summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--.tito/releasers.conf4
-rw-r--r--.travis.yml18
-rw-r--r--CONTRIBUTING.md70
-rw-r--r--DEPLOYMENT_TYPES.md19
-rw-r--r--Dockerfile9
-rw-r--r--Dockerfile.rhel753
-rw-r--r--README_CONTAINER_IMAGE.md16
-rw-r--r--callback_plugins/openshift_quick_installer.py83
-rw-r--r--examples/README.md93
-rw-r--r--examples/certificate-check-upload.yaml47
-rw-r--r--examples/certificate-check-volume.yaml54
-rw-r--r--examples/scheduled-certcheck-upload.yaml53
-rw-r--r--examples/scheduled-certcheck-volume.yaml58
-rw-r--r--filter_plugins/oo_filters.py101
-rw-r--r--filter_plugins/openshift_version.py129
-rw-r--r--inventory/byo/hosts.openstack2
-rw-r--r--inventory/byo/hosts.origin.example52
-rw-r--r--inventory/byo/hosts.ose.example50
-rw-r--r--openshift-ansible.spec748
-rw-r--r--playbooks/aws/openshift-cluster/config.yml1
-rw-r--r--playbooks/byo/openshift-cluster/config.yml1
-rw-r--r--playbooks/byo/openshift-cluster/openshift-logging.yml1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/README.md18
l---------playbooks/byo/openshift-cluster/upgrades/v3_6/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml111
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml115
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml104
-rw-r--r--playbooks/byo/openshift-etcd/restart.yml2
-rw-r--r--playbooks/byo/openshift-master/restart.yml2
-rw-r--r--playbooks/byo/openshift-master/scaleup.yml1
-rw-r--r--playbooks/byo/openshift-node/restart.yml2
-rw-r--r--playbooks/byo/openshift-node/scaleup.yml1
-rw-r--r--playbooks/byo/rhel_subscribe.yml2
-rw-r--r--playbooks/certificate_expiry/easy-mode-upload.yaml40
-rw-r--r--playbooks/certificate_expiry/html_and_json_timestamp.yaml16
-rw-r--r--playbooks/common/openshift-cluster/config.yml7
-rw-r--r--playbooks/common/openshift-cluster/disable_excluder.yml4
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml2
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml7
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml8
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/ca.yml2
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/registry.yml9
-rw-r--r--playbooks/common/openshift-cluster/reset_excluder.yml2
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml2
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_excluder.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml35
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml23
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml6
l---------playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_6/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml10
-rw-r--r--playbooks/common/openshift-master/config.yml10
-rw-r--r--playbooks/common/openshift-master/scaleup.yml8
-rw-r--r--playbooks/common/openshift-node/config.yml2
-rw-r--r--playbooks/common/openshift-node/scaleup.yml8
-rw-r--r--playbooks/gce/openshift-cluster/config.yml1
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml1
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml1
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml10
-rw-r--r--pytest.ini1
-rw-r--r--roles/calico/README.md28
-rw-r--r--roles/calico/defaults/main.yaml10
-rw-r--r--roles/calico/handlers/main.yml8
-rw-r--r--roles/calico/meta/main.yml16
-rw-r--r--roles/calico/tasks/main.yml74
-rw-r--r--roles/calico/templates/calico.cfg.j29
-rw-r--r--roles/calico/templates/calico.conf.j218
-rw-r--r--roles/calico/templates/calico.service.j229
-rw-r--r--roles/calico_master/README.md28
-rw-r--r--roles/calico_master/defaults/main.yaml2
-rw-r--r--roles/calico_master/meta/main.yml17
-rw-r--r--roles/calico_master/tasks/main.yml41
-rw-r--r--roles/calico_master/templates/calico-policy-controller.yml.j2105
-rw-r--r--roles/cockpit-ui/tasks/main.yml13
-rw-r--r--roles/etcd/meta/main.yml1
-rw-r--r--roles/etcd/tasks/system_container.yml4
-rw-r--r--roles/etcd/templates/etcd.conf.j26
-rw-r--r--roles/etcd_client_certificates/tasks/main.yml2
-rw-r--r--roles/etcd_server_certificates/meta/main.yml2
-rw-r--r--roles/etcd_server_certificates/tasks/main.yml4
-rw-r--r--roles/kube_nfs_volumes/README.md114
-rw-r--r--roles/kube_nfs_volumes/defaults/main.yml16
-rw-r--r--roles/kube_nfs_volumes/handlers/main.yml3
-rw-r--r--roles/kube_nfs_volumes/library/partitionpool.py247
-rw-r--r--roles/kube_nfs_volumes/meta/main.yml17
-rw-r--r--roles/kube_nfs_volumes/tasks/main.yml34
-rw-r--r--roles/kube_nfs_volumes/tasks/nfs.yml23
l---------roles/kube_nfs_volumes/templates/v1/nfs.json.j21
-rw-r--r--roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j223
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py260
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py (renamed from roles/lib_openshift/library/oadm_manage_node.py)263
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py291
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py308
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py264
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py249
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py1807
-rw-r--r--roles/lib_openshift/library/oc_configmap.py1628
-rw-r--r--roles/lib_openshift/library/oc_edit.py247
-rw-r--r--roles/lib_openshift/library/oc_env.py247
-rw-r--r--roles/lib_openshift/library/oc_group.py1607
-rw-r--r--roles/lib_openshift/library/oc_image.py1533
-rw-r--r--roles/lib_openshift/library/oc_label.py251
-rw-r--r--roles/lib_openshift/library/oc_obj.py292
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py247
-rw-r--r--roles/lib_openshift/library/oc_process.py259
-rw-r--r--roles/lib_openshift/library/oc_project.py306
-rw-r--r--roles/lib_openshift/library/oc_pvc.py1780
-rw-r--r--roles/lib_openshift/library/oc_route.py247
-rw-r--r--roles/lib_openshift/library/oc_scale.py247
-rw-r--r--roles/lib_openshift/library/oc_secret.py247
-rw-r--r--roles/lib_openshift/library/oc_service.py247
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py247
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py247
-rw-r--r--roles/lib_openshift/library/oc_user.py1761
-rw-r--r--roles/lib_openshift/library/oc_version.py247
-rw-r--r--roles/lib_openshift/library/oc_volume.py2071
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py1
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_manage_node.py (renamed from roles/lib_openshift/src/ansible/oadm_manage_node.py)0
-rw-r--r--roles/lib_openshift/src/ansible/oc_clusterrole.py29
-rw-r--r--roles/lib_openshift/src/ansible/oc_configmap.py32
-rw-r--r--roles/lib_openshift/src/ansible/oc_group.py32
-rw-r--r--roles/lib_openshift/src/ansible/oc_image.py34
-rw-r--r--roles/lib_openshift/src/ansible/oc_obj.py2
-rw-r--r--roles/lib_openshift/src/ansible/oc_pvc.py35
-rw-r--r--roles/lib_openshift/src/ansible/oc_user.py34
-rw-r--r--roles/lib_openshift/src/ansible/oc_volume.py41
-rw-r--r--roles/lib_openshift/src/class/oc_adm_ca_server_cert.py6
-rw-r--r--roles/lib_openshift/src/class/oc_adm_manage_node.py (renamed from roles/lib_openshift/src/class/oadm_manage_node.py)2
-rw-r--r--roles/lib_openshift/src/class/oc_adm_policy_group.py44
-rw-r--r--roles/lib_openshift/src/class/oc_adm_policy_user.py61
-rw-r--r--roles/lib_openshift/src/class/oc_adm_registry.py10
-rw-r--r--roles/lib_openshift/src/class/oc_adm_router.py2
-rw-r--r--roles/lib_openshift/src/class/oc_clusterrole.py163
-rw-r--r--roles/lib_openshift/src/class/oc_configmap.py191
-rw-r--r--roles/lib_openshift/src/class/oc_group.py148
-rw-r--r--roles/lib_openshift/src/class/oc_image.py91
-rw-r--r--roles/lib_openshift/src/class/oc_label.py4
-rw-r--r--roles/lib_openshift/src/class/oc_obj.py43
-rw-r--r--roles/lib_openshift/src/class/oc_process.py12
-rw-r--r--roles/lib_openshift/src/class/oc_project.py59
-rw-r--r--roles/lib_openshift/src/class/oc_pvc.py167
-rw-r--r--roles/lib_openshift/src/class/oc_user.py227
-rw-r--r--roles/lib_openshift/src/class/oc_volume.py195
-rw-r--r--roles/lib_openshift/src/doc/ca_server_cert6
-rw-r--r--roles/lib_openshift/src/doc/clusterrole66
-rw-r--r--roles/lib_openshift/src/doc/configmap72
-rw-r--r--roles/lib_openshift/src/doc/group56
-rw-r--r--roles/lib_openshift/src/doc/image75
-rw-r--r--roles/lib_openshift/src/doc/manage_node6
-rw-r--r--roles/lib_openshift/src/doc/pvc76
-rw-r--r--roles/lib_openshift/src/doc/user128
-rw-r--r--roles/lib_openshift/src/doc/volume105
-rwxr-xr-xroles/lib_openshift/src/generate.py9
-rw-r--r--roles/lib_openshift/src/lib/base.py44
-rw-r--r--roles/lib_openshift/src/lib/clusterrole.py68
-rw-r--r--roles/lib_openshift/src/lib/group.py36
-rw-r--r--roles/lib_openshift/src/lib/pvc.py167
-rw-r--r--roles/lib_openshift/src/lib/rule.py144
-rw-r--r--roles/lib_openshift/src/lib/user.py37
-rw-r--r--roles/lib_openshift/src/lib/volume.py7
-rw-r--r--roles/lib_openshift/src/sources.yml84
-rwxr-xr-xroles/lib_openshift/src/test/integration/group.yml229
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_adm_manage_node.yml (renamed from roles/lib_openshift/src/test/integration/oadm_manage_node.yml)8
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_clusterrole.yml106
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_configmap.yml95
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_obj.yml207
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_user.yml240
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_manage_node.py (renamed from roles/lib_openshift/src/test/unit/test_oadm_manage_node.py)14
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_clusterrole.py115
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_configmap.py239
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_group.py253
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_image.py280
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_project.py200
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_pvc.py366
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_route.py2
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_user.py127
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_volume.py633
-rw-r--r--roles/lib_utils/library/yedit.py237
-rw-r--r--roles/lib_utils/src/ansible/yedit.py24
-rw-r--r--roles/lib_utils/src/class/yedit.py198
-rw-r--r--roles/lib_utils/src/doc/yedit16
-rw-r--r--roles/lib_utils/src/test/integration/kube-manager-test.yaml.orig52
-rwxr-xr-xroles/lib_utils/src/test/integration/yedit.yml31
-rwxr-xr-xroles/lib_utils/src/test/unit/test_yedit.py86
-rw-r--r--roles/nuage_master/tasks/main.yaml9
-rw-r--r--roles/nuage_master/tasks/serviceaccount.yml14
-rw-r--r--roles/openshift_ca/README.md2
-rw-r--r--roles/openshift_ca/defaults/main.yml3
-rw-r--r--roles/openshift_ca/tasks/main.yml6
-rw-r--r--roles/openshift_certificate_expiry/README.md133
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py5
-rw-r--r--roles/openshift_common/tasks/main.yml9
-rw-r--r--roles/openshift_etcd_ca/meta/main.yml (renamed from roles/openshift_serviceaccounts/meta/main.yml)11
-rw-r--r--roles/openshift_etcd_ca/tasks/main.yml1
-rwxr-xr-xroles/openshift_examples/examples-sync.sh9
l---------roles/openshift_examples/files/examples/latest2
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json17
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json17
-rw-r--r--roles/openshift_examples/files/examples/v1.5/image-streams/dotnet_imagestreams.json15
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-example.json333
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json544
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json10
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json10
-rw-r--r--roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml479
-rw-r--r--roles/openshift_examples/files/examples/v1.6/db-templates/README.md76
-rw-r--r--roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json229
-rw-r--r--roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json253
-rw-r--r--roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json258
-rw-r--r--roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json282
-rw-r--r--roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json258
-rw-r--r--roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json260
-rw-r--r--roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json240
-rw-r--r--roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json264
-rw-r--r--roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json211
-rw-r--r--roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json235
-rw-r--r--roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json79
-rw-r--r--roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json829
-rw-r--r--roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json736
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md27
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml149
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json582
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json556
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json523
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json497
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json536
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json510
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json333
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json544
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json289
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json313
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json545
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json521
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json602
-rw-r--r--roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json576
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json76
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json397
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json321
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json549
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json371
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json503
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json332
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json501
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json779
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json739
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json756
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json716
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json415
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json763
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json642
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json686
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json339
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json473
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json696
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json339
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json473
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json813
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json760
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json340
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json525
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json781
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json741
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json792
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json752
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json769
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json729
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json756
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json813
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json760
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json351
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json536
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json792
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json752
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json807
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json767
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json784
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json744
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json767
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json284
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json398
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json654
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json614
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json656
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json616
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json633
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json593
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json284
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json398
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json654
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json614
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json656
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json616
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json633
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json591
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json362
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json336
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json421
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json385
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json267
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json1079
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json959
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json1052
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json932
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json345
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json792
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json716
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json765
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json689
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json331
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json327
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json334
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json315
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json403
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json343
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json514
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json750
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json719
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json727
-rw-r--r--roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json696
-rw-r--r--roles/openshift_excluder/README.md2
-rw-r--r--roles/openshift_excluder/tasks/adjust.yml23
-rw-r--r--roles/openshift_excluder/tasks/disable.yml37
-rw-r--r--roles/openshift_excluder/tasks/enable.yml9
-rw-r--r--roles/openshift_excluder/tasks/exclude.yml22
-rw-r--r--roles/openshift_excluder/tasks/install.yml4
-rw-r--r--roles/openshift_excluder/tasks/main.yml2
-rw-r--r--roles/openshift_excluder/tasks/status.yml84
-rw-r--r--roles/openshift_excluder/tasks/unexclude.yml19
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py27
-rw-r--r--roles/openshift_facts/meta/main.yml3
-rw-r--r--roles/openshift_facts/tasks/main.yml9
-rw-r--r--roles/openshift_facts/vars/Fedora.yml6
-rw-r--r--roles/openshift_facts/vars/default.yml6
-rw-r--r--roles/openshift_facts/vars/main.yml7
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py67
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py34
-rwxr-xr-xroles/openshift_health_checker/library/aos_version.py1
-rwxr-xr-xroles/openshift_health_checker/library/check_yum_update.py1
-rw-r--r--roles/openshift_health_checker/library/docker_container.py2036
-rw-r--r--roles/openshift_health_checker/library/docker_info.py24
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py46
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py179
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_update.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py2
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py227
-rw-r--r--roles/openshift_health_checker/test/conftest.py10
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py28
-rw-r--r--roles/openshift_health_checker/test/mixins_test.py23
-rw-r--r--roles/openshift_health_checker/test/openshift_check_test.py99
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py49
-rw-r--r--roles/openshift_health_checker/test/package_update_test.py16
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py21
-rw-r--r--roles/openshift_hosted/README.md1
-rw-r--r--roles/openshift_hosted/defaults/main.yml10
-rw-r--r--roles/openshift_hosted/filter_plugins/filters.py13
-rw-r--r--roles/openshift_hosted/meta/main.yml18
-rw-r--r--roles/openshift_hosted/tasks/main.yml7
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml16
-rw-r--r--roles/openshift_hosted/tasks/registry/secure.yml4
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml50
-rw-r--r--roles/openshift_hosted/templates/registry_config.j22
-rw-r--r--roles/openshift_hosted/vars/main.yml10
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml7
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml3
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml345
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml124
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml342
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml124
-rw-r--r--roles/openshift_logging/README.md5
-rw-r--r--roles/openshift_logging/defaults/main.yml30
-rw-r--r--roles/openshift_logging/files/generate-jks.sh2
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py13
-rw-r--r--roles/openshift_logging/meta/main.yaml1
-rw-r--r--roles/openshift_logging/tasks/generate_configmaps.yaml29
-rw-r--r--roles/openshift_logging/tasks/generate_jks.yaml10
-rw-r--r--roles/openshift_logging/tasks/generate_pems.yaml3
-rw-r--r--roles/openshift_logging/tasks/generate_pvcs.yaml6
-rw-r--r--roles/openshift_logging/tasks/generate_routes.yaml57
-rw-r--r--roles/openshift_logging/tasks/generate_secrets.yaml8
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml26
-rw-r--r--roles/openshift_logging/tasks/main.yaml24
-rw-r--r--roles/openshift_logging/tasks/procure_server_certs.yaml36
-rw-r--r--roles/openshift_logging/templates/elasticsearch-logging.yml.j2 (renamed from roles/openshift_logging/files/elasticsearch-logging.yml)33
-rw-r--r--roles/openshift_logging/templates/elasticsearch.yml.j210
-rw-r--r--roles/openshift_logging/templates/es-storage-emptydir.partial1
-rw-r--r--roles/openshift_logging/templates/es-storage-hostpath.partial2
-rw-r--r--roles/openshift_logging/templates/es-storage-pvc.partial2
-rw-r--r--roles/openshift_logging/templates/es.j27
-rw-r--r--roles/openshift_logging/templates/pvc.j22
-rw-r--r--roles/openshift_logging/vars/default_images.yml3
-rw-r--r--roles/openshift_logging/vars/main.yaml4
-rw-r--r--roles/openshift_logging/vars/openshift-enterprise.yml3
-rw-r--r--roles/openshift_manage_node/tasks/main.yml2
-rw-r--r--roles/openshift_master/meta/main.yml3
-rw-r--r--roles/openshift_master/tasks/main.yml2
-rw-r--r--roles/openshift_master/tasks/system_container.yml4
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j22
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j218
-rw-r--r--roles/openshift_master/templates/master_docker/master.docker.service.j22
-rw-r--r--roles/openshift_master_certificates/README.md1
-rw-r--r--roles/openshift_master_certificates/defaults/main.yml2
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml6
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py17
-rw-r--r--roles/openshift_metrics/defaults/main.yaml4
-rwxr-xr-xroles/openshift_metrics/files/import_jks_certs.sh59
-rw-r--r--roles/openshift_metrics/handlers/main.yml26
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml51
-rw-r--r--roles/openshift_metrics/tasks/import_jks_certs.yaml19
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml9
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml2
-rw-r--r--roles/openshift_metrics/tasks/main.yaml12
-rw-r--r--roles/openshift_metrics/tasks/update_master_config.yaml9
-rw-r--r--roles/openshift_metrics/templates/hawkular_cassandra_rc.j225
-rw-r--r--roles/openshift_metrics/templates/pvc.j24
-rw-r--r--roles/openshift_metrics/templates/secret.j26
-rw-r--r--roles/openshift_metrics/vars/default_images.yml3
-rw-r--r--roles/openshift_metrics/vars/main.yaml1
-rw-r--r--roles/openshift_metrics/vars/openshift-enterprise.yml3
-rw-r--r--roles/openshift_node/defaults/main.yml4
-rw-r--r--roles/openshift_node/meta/main.yml5
-rw-r--r--roles/openshift_node/tasks/main.yml2
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml4
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml4
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j29
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service4
-rw-r--r--roles/openshift_node_certificates/README.md1
-rw-r--r--roles/openshift_node_certificates/defaults/main.yml2
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml6
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml21
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.service2
-rw-r--r--roles/openshift_projects/tasks/main.yml47
-rw-r--r--roles/openshift_projects/vars/main.yml2
-rw-r--r--roles/openshift_repos/meta/main.yml3
-rw-r--r--roles/openshift_repos/tasks/main.yaml4
-rw-r--r--roles/openshift_sanitize_inventory/README.md37
-rw-r--r--roles/openshift_sanitize_inventory/meta/main.yml (renamed from roles/openshift_projects/meta/main.yml)10
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml35
-rw-r--r--roles/openshift_sanitize_inventory/vars/main.yml (renamed from roles/openshift_repos/vars/main.yml)0
-rw-r--r--roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml38
-rw-r--r--roles/openshift_serviceaccounts/tasks/main.yml28
-rw-r--r--roles/openshift_serviceaccounts/templates/serviceaccount.j24
-rw-r--r--roles/openshift_version/tasks/main.yml93
-rw-r--r--roles/openshift_version/tasks/set_version_containerized.yml21
-rw-r--r--roles/openshift_version/tasks/set_version_rpm.yml34
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml16
-rw-r--r--test/openshift_version_tests.py72
-rw-r--r--utils/src/ooinstall/ansible_plugins/facts_callback.py14
467 files changed, 93160 insertions, 3845 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 3b7826d31..49ecde422 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.5.3-1 ./
+3.6.21-1 ./
diff --git a/.tito/releasers.conf b/.tito/releasers.conf
index 032212b24..b52e4fd87 100644
--- a/.tito/releasers.conf
+++ b/.tito/releasers.conf
@@ -32,6 +32,10 @@ releaser = tito.release.DistGitReleaser
branches = rhaos-3.5-rhel-7
srpm_disttag = .el7aos
+[aos-3.6]
+releaser = tito.release.DistGitReleaser
+branches = rhaos-3.6-rhel-7
+srpm_disttag = .el7aos
[copr-openshift-ansible]
releaser = tito.release.CoprReleaser
diff --git a/.travis.yml b/.travis.yml
index 0698b0280..245202139 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -20,21 +20,3 @@ script:
after_success:
- coveralls
-
-notifications:
- email:
- recipients:
- - jdetiber@redhat.com
- - sdodson@redhat.com
- on_success: change
- on_failure: always
- irc:
- channels:
- - chat.freenode.net#openshift-dev
- on_success: change
- on_failure: always
- template:
- - "%{repository}#%{build_number} (%{branch} - %{commit} : %{author}): %{message}"
- - "Change view : %{compare_url}"
- - "Build details : %{build_url}"
- - "sdodson jdetiber: ^"
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 12f3efc09..50bb09470 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -75,7 +75,10 @@ See the [RPM build instructions](BUILD.md).
We use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run
tests. Alternatively, tests can be run using
[detox](https://pypi.python.org/pypi/detox/) which allows for running tests in
-parallel
+parallel.
+
+Note: while `detox` may be useful in development to make use of multiple cores,
+it can be buggy at times and produce flakes, thus we do not use it in our CI.
```
@@ -95,43 +98,90 @@ by `tox`:
$ find . -path '*/bin/python' | grep -vF .tox
```
-Extraneous virtualenvs cause tools such as `pylint` to take a very long time
-going through files that are part of the virtualenv.
+The reason for this recommendation is that extraneous virtualenvs cause tools
+such as `pylint` to take a very long time going through files that are part of
+the virtualenv, and test discovery to go through lots of irrelevant files and
+potentially fail.
---
List the test environments available:
+
```
tox -l
```
-Run all of the tests with:
+Run all of the tests and linters with:
+
```
tox
```
-Run all of the tests in parallel with detox:
+Run all of the tests linters in parallel (may flake):
+
```
detox
```
-Running a particular test environment (python 2.7 flake8 tests in this case):
+### Run only unit tests or some specific linter
+
+Run a particular test environment (`flake8` on Python 2.7 in this case):
+
```
tox -e py27-flake8
```
-Running a particular test environment in a clean virtualenv (python 3.5 pylint
-tests in this case):
+Run a particular test environment in a clean virtualenv (`pylint` on Python 3.5
+in this case):
+
```
-tox -r -e py35-pylint
+tox -re py35-pylint
```
-If you want to enter the virtualenv created by tox to do additional
+### Tricks
+
+#### Activating a virtualenv managed by tox
+
+If you want to enter a virtualenv created by tox to do additional
testing/debugging (py27-flake8 env in this case):
+
```
source .tox/py27-flake8/bin/activate
```
+#### Limiting the unit tests that are run
+
+During development, it might be useful to constantly run just a single test file
+or test method, or to pass custom arguments to `pytest`:
+
+```
+tox -e py27-unit -- path/to/test/file.py
+```
+
+Anything after `--` is passed directly to `pytest`. To learn more about what
+other flags you can use, try:
+
+```
+tox -e py27-unit -- -h
+```
+
+As a practical example, the snippet below shows how to list all tests in a
+certain file, and then execute only one test of interest:
+
+```
+$ tox -e py27-unit -- roles/lib_openshift/src/test/unit/test_oc_project.py --collect-only --no-cov
+...
+collected 1 items
+<Module 'roles/lib_openshift/src/test/unit/test_oc_project.py'>
+ <UnitTestCase 'OCProjectTest'>
+ <TestCaseFunction 'test_adding_a_project'>
+...
+$ tox -e py27-unit -- roles/lib_openshift/src/test/unit/test_oc_project.py -k test_adding_a_project
+```
+
+Among other things, this can be used for instance to see the coverage levels of
+individual modules as we work on improving tests.
+
## Submitting contributions
1. Go through the guides from the [introduction](#Introduction).
diff --git a/DEPLOYMENT_TYPES.md b/DEPLOYMENT_TYPES.md
index 668d14fc0..42ac5635a 100644
--- a/DEPLOYMENT_TYPES.md
+++ b/DEPLOYMENT_TYPES.md
@@ -1,18 +1,17 @@
#Deployment Types
-This module supports OpenShift Origin, OpenShift Enterprise, and Atomic
-Enterprise Platform. Each deployment type sets various defaults used throughout
-your environment.
+This module supports OpenShift Origin and OpenShift Enterprise Each deployment
+type sets various defaults used throughout your environment.
The table below outlines the defaults per `deployment_type`.
-| deployment_type | origin | enterprise (< 3.1) | atomic-enterprise | openshift-enterprise (>= 3.1) |
-|-----------------------------------------------------------------|------------------------------------------|----------------------------------------|----------------------------------|----------------------------------|
-| **openshift.common.service_type** (also used for package names) | origin | openshift | atomic-openshift | |
-| **openshift.common.config_base** | /etc/origin | /etc/openshift | /etc/origin | /etc/origin |
-| **openshift.common.data_dir** | /var/lib/origin | /var/lib/openshift | /var/lib/origin | /var/lib/origin |
-| **openshift.master.registry_url openshift.node.registry_url** | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} | aos3/aos-${component}:${version} | aos3/aos-${component}:${version} |
-| **Image Streams** | centos | rhel + xpaas | N/A | rhel |
+| deployment_type | origin | enterprise (< 3.1) | openshift-enterprise (>= 3.1) |
+|-----------------------------------------------------------------|------------------------------------------|----------------------------------------|----------------------------------|
+| **openshift.common.service_type** (also used for package names) | origin | openshift | |
+| **openshift.common.config_base** | /etc/origin | /etc/openshift | /etc/origin |
+| **openshift.common.data_dir** | /var/lib/origin | /var/lib/openshift | /var/lib/origin |
+| **openshift.master.registry_url openshift.node.registry_url** | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} | aos3/aos-${component}:${version} |
+| **Image Streams** | centos | rhel + xpaas | rhel |
**NOTE** `enterprise` deployment type is used for OpenShift Enterprise version
diff --git a/Dockerfile b/Dockerfile
index c6593491d..eecf3630b 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -14,6 +14,15 @@ LABEL name="openshift-ansible" \
io.openshift.expose-services="" \
io.openshift.tags="openshift,install,upgrade,ansible"
+USER root
+
+RUN INSTALL_PKGS="skopeo" && \
+ yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \
+ rpm -V $INSTALL_PKGS && \
+ yum clean all
+
+USER ${USER_UID}
+
# The playbook to be run is specified via the PLAYBOOK_FILE env var.
# This sets a default of openshift_facts.yml as it's an informative playbook
# that can help test that everything is set properly (inventory, sshkeys)
diff --git a/Dockerfile.rhel7 b/Dockerfile.rhel7
index f3d45837a..0d5a6038a 100644
--- a/Dockerfile.rhel7
+++ b/Dockerfile.rhel7
@@ -1,26 +1,41 @@
-FROM rhel7
+FROM openshift3/playbook2image
-MAINTAINER Troy Dawson <tdawson@redhat.com>
+MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
-LABEL Name="openshift3/installer"
-LABEL Vendor="Red Hat" License=GPLv2+
-LABEL Version="v3.1.1.901"
-LABEL Release="6"
-LABEL BZComponent="aos3-installation-docker"
-LABEL Architecture="x86_64"
-LABEL io.k8s.description="Ansible code and playbooks for installing Openshift Container Platform." \
- io.k8s.display-name="Openshift Installer" \
- io.openshift.tags="openshift,installer"
+LABEL name="openshift3/openshift-ansible" \
+ summary="OpenShift's installation and configuration tool" \
+ description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
+ url="https://github.com/openshift/openshift-ansible" \
+ io.k8s.display-name="openshift-ansible" \
+ io.k8s.description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
+ io.openshift.expose-services="" \
+ io.openshift.tags="openshift,install,upgrade,ansible" \
+ com.redhat.component="aos3-installation-docker" \
+ version="v3.4.1" \
+ release="1" \
+ architecture="x86_64"
-RUN INSTALL_PKGS="atomic-openshift-utils" && \
- yum install -y --enablerepo=rhel-7-server-ose-3.2-rpms $INSTALL_PKGS && \
- rpm -V $INSTALL_PKGS && \
+# Playbooks, roles and their dependencies are installed from packages.
+# Unlike in Dockerfile, we don't invoke the 'assemble' script here
+# because all content and dependencies (like 'oc') is already
+# installed via yum.
+USER root
+RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients" && \
+ yum repolist > /dev/null && \
+ yum-config-manager --enable rhel-7-server-ose-3.4-rpms && \
+ yum install -y $INSTALL_PKGS && \
yum clean all
-# Expect user to mount a workdir for container output (installer.cfg, hosts inventory, ansible log)
-VOLUME /var/lib/openshift-installer/
-WORKDIR /var/lib/openshift-installer/
+USER ${USER_UID}
-RUN mkdir -p /var/lib/openshift-installer/
+# The playbook to be run is specified via the PLAYBOOK_FILE env var.
+# This sets a default of openshift_facts.yml as it's an informative playbook
+# that can help test that everything is set properly (inventory, sshkeys).
+# As the playbooks are installed via packages instead of being copied to
+# $APP_HOME by the 'assemble' script, we set the WORK_DIR env var to the
+# location of openshift-ansible.
+ENV PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \
+ WORK_DIR=/usr/share/ansible/openshift-ansible \
+ OPTS="-v"
-ENTRYPOINT ["/usr/bin/atomic-openshift-installer", "-c", "/var/lib/openshift-installer/installer.cfg", "--ansible-log-path", "/var/lib/openshift-installer/ansible.log"]
+CMD [ "/usr/libexec/s2i/run" ]
diff --git a/README_CONTAINER_IMAGE.md b/README_CONTAINER_IMAGE.md
index f62fc2ab9..2499e01d4 100644
--- a/README_CONTAINER_IMAGE.md
+++ b/README_CONTAINER_IMAGE.md
@@ -2,7 +2,7 @@
The [Dockerfile](Dockerfile) in this repository uses the [playbook2image](https://github.com/aweiteka/playbook2image) source-to-image base image to containerize `openshift-ansible`. The resulting image can run any of the provided playbooks.
-**Note**: at this time there are known issues that prevent to run this image for installation/upgrade purposes from within one of the hosts that is also an installation target at the same time: if the playbook you want to run attempts to manage the docker daemon and restart it (like install/upgrade playbooks do) this would kill the container itself during its operation.
+**Note**: at this time there are known issues that prevent to run this image for installation/upgrade purposes (i.e. run one of the config/upgrade playbooks) from within one of the hosts that is also an installation target at the same time: if the playbook you want to run attempts to manage the docker daemon and restart it (like install/upgrade playbooks do) this would kill the container itself during its operation.
## Build
@@ -11,7 +11,7 @@ To build a container image of `openshift-ansible`:
1. Using standalone **Docker**:
cd openshift-ansible
- docker build -t openshift-ansible .
+ docker build -t openshift/openshift-ansible .
1. Using an **OpenShift** build:
@@ -20,15 +20,15 @@ To build a container image of `openshift-ansible`:
## Usage
-The base image provides several options to control the behaviour of the containers. For more details on these options see the [playbook2image](https://github.com/aweiteka/playbook2image) documentation.
+The `playbook2image` base image provides several options to control the behaviour of the containers. For more details on these options see the [playbook2image](https://github.com/aweiteka/playbook2image) documentation.
At the very least, when running a container using an image built this way you must specify:
-1. The **playbook** to run. This is set using the `PLAYBOOK_FILE` environment variable.
1. An **inventory** file. This can be mounted inside the container as a volume and specified with the `INVENTORY_FILE` environment variable. Alternatively you can serve the inventory file from a web server and use the `INVENTORY_URL` environment variable to fetch it.
1. **ssh keys** so that Ansible can reach your hosts. These should be mounted as a volume under `/opt/app-root/src/.ssh`
+1. The **playbook** to run. This is set using the `PLAYBOOK_FILE` environment variable. If you don't specify a playbook the [`openshift_facts`](playbooks/byo/openshift_facts.yml) playbook will be run to collecting and show facts about your OpenShift environment.
-Here is an example of how to run a containerized `openshift-ansible` playbook that will check the expiration dates of OpenShift's internal certificates using the [`openshift_certificate_expiry` role](../../roles/openshift_certificate_expiry). The inventory and ssh keys are mounted as volumes (the latter requires setting the uid in the container and SELinux label in the key file via `:Z` so they can be accessed) and the `PLAYBOOK_FILE` environment variable is set to point to an example certificate check playbook that is already part of the image:
+Here is an example of how to run a containerized `openshift-ansible` playbook that will check the expiration dates of OpenShift's internal certificates using the [`openshift_certificate_expiry` role](roles/openshift_certificate_expiry). The inventory and ssh keys are mounted as volumes (the latter requires setting the uid in the container and SELinux label in the key file via `:Z` so they can be accessed) and the `PLAYBOOK_FILE` environment variable is set to point to an example certificate check playbook that is already part of the image:
docker run -u `id -u` \
-v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z \
@@ -36,6 +36,8 @@ Here is an example of how to run a containerized `openshift-ansible` playbook th
-e INVENTORY_FILE=/tmp/inventory \
-e OPTS="-v" \
-e PLAYBOOK_FILE=playbooks/certificate_expiry/default.yaml \
- openshift-ansible
+ openshift/openshift-ansible
-The [playbook2image examples](https://github.com/aweiteka/playbook2image/tree/master/examples) provide additional information on how to use a built image.
+Further usage examples are available in the [examples directory](examples/).
+
+Additional usage information for images built from `playbook2image` like this one can be found in the [playbook2image examples](https://github.com/aweiteka/playbook2image/tree/master/examples).
diff --git a/callback_plugins/openshift_quick_installer.py b/callback_plugins/openshift_quick_installer.py
index b4c7edd38..c0fdbc650 100644
--- a/callback_plugins/openshift_quick_installer.py
+++ b/callback_plugins/openshift_quick_installer.py
@@ -54,6 +54,12 @@ class CallbackModule(CallbackBase):
plays_count = 0
plays_total_ran = 0
+ def __init__(self):
+ """Constructor, ensure standard self.*s are set"""
+ self._play = None
+ self._last_task_banner = None
+ super(CallbackModule, self).__init__()
+
def banner(self, msg, color=None):
'''Prints a header-looking line with stars taking up to 80 columns
of width (3 columns, minimum)
@@ -68,6 +74,29 @@ class CallbackModule(CallbackBase):
stars = "*" * star_len
self._display.display("\n%s %s" % (msg, stars), color=color, log_only=True)
+ def _print_task_banner(self, task):
+ """Imported from the upstream 'default' callback"""
+ # args can be specified as no_log in several places: in the task or in
+ # the argument spec. We can check whether the task is no_log but the
+ # argument spec can't be because that is only run on the target
+ # machine and we haven't run it thereyet at this time.
+ #
+ # So we give people a config option to affect display of the args so
+ # that they can secure this if they feel that their stdout is insecure
+ # (shoulder surfing, logging stdout straight to a file, etc).
+ args = ''
+ if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
+ args = ', '.join('%s=%s' % a for a in task.args.items())
+ args = ' %s' % args
+
+ self.banner(u"TASK [%s%s]" % (task.get_name().strip(), args))
+ if self._display.verbosity >= 2:
+ path = task.get_path()
+ if path:
+ self._display.display(u"task path: %s" % path, color=C.COLOR_DEBUG, log_only=True)
+
+ self._last_task_banner = task._uuid
+
def v2_playbook_on_start(self, playbook):
"""This is basically the start of it all"""
self.plays_count = len(playbook.get_plays())
@@ -236,6 +265,60 @@ The only thing we change here is adding `log_only=True` to the
"""
self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP, log_only=True)
+ ######################################################################
+ # So we can bubble up errors to the top
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ """I guess this is when an entire task has failed?"""
+
+ if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if 'exception' in result._result:
+ if self._display.verbosity < 3:
+ # extract just the actual error message from the exception text
+ error = result._result['exception'].strip().split('\n')[-1]
+ msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
+ else:
+ msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
+
+ self._display.display(msg, color=C.COLOR_ERROR)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+
+ else:
+ if delegated_vars:
+ self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR)
+ else:
+ self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
+
+ if ignore_errors:
+ self._display.display("...ignoring", color=C.COLOR_SKIP)
+
+ def v2_runner_item_on_failed(self, result):
+ """When an item in a task fails."""
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if 'exception' in result._result:
+ if self._display.verbosity < 3:
+ # extract just the actual error message from the exception text
+ error = result._result['exception'].strip().split('\n')[-1]
+ msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
+ else:
+ msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
+
+ self._display.display(msg, color=C.COLOR_ERROR)
+
+ msg = "failed: "
+ if delegated_vars:
+ msg += "[%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg += "[%s]" % (result._host.get_name())
+
+ self._display.display(msg + " (item=%s) => %s" % (self._get_item(result._result), self._dump_results(result._result)), color=C.COLOR_ERROR)
+ self._handle_warnings(result._result)
+
+ ######################################################################
def v2_playbook_on_stats(self, stats):
"""Print the final playbook run stats"""
self._display.display("", screen_only=True)
diff --git a/examples/README.md b/examples/README.md
new file mode 100644
index 000000000..0e412244d
--- /dev/null
+++ b/examples/README.md
@@ -0,0 +1,93 @@
+# openshift-ansible usage examples
+
+The primary use of `openshift-ansible` is to install, configure and upgrade OpenShift clusters.
+
+This is typically done by direct invocation of Ansible tools like `ansible-playbook`. This use case is covered in detail in the [OpenShift advanced installation documentation](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
+
+For OpenShift Container Platform there's also an installation utility that wraps `openshift-ansible`. This usage case is covered in the [Quick Installation](https://docs.openshift.com/container-platform/latest/install_config/install/quick_install.html) section of the documentation.
+
+The usage examples below cover use cases other than install/configure/upgrade.
+
+## Container image
+
+The examples below run [openshift-ansible in a container](../README_CONTAINER_IMAGE.md) to perform certificate expiration checks on an OpenShift cluster from pods running on the cluster itself.
+
+You can find more details about the certificate expiration check roles and example playbooks in [the openshift_certificate_expiry role's README](../roles/openshift_certificate_expiry/README.md).
+
+### Job to upload certificate expiration reports
+
+The example `Job` in [certificate-check-upload.yaml](certificate-check-upload.yaml) executes a [Job](https://docs.openshift.org/latest/dev_guide/jobs.html) that checks the expiration dates of the internal certificates of the cluster and uploads HTML and JSON reports to `/etc/origin/certificate_expiration_report` in the masters.
+
+This example uses the [`easy-mode-upload.yaml`](../playbooks/certificate_expiry/easy-mode-upload.yaml) example playbook, which generates reports and uploads them to the masters. The playbook can be customized via environment variables to control the length of the warning period (`CERT_EXPIRY_WARN_DAYS`) and the location in the masters where the reports are uploaded (`COPY_TO_PATH`).
+
+The job expects the inventory to be provided via the *hosts* key of a [ConfigMap](https://docs.openshift.org/latest/dev_guide/configmaps.html) named *inventory*, and the passwordless ssh key that allows connecting to the hosts to be availalbe as *ssh-privatekey* from a [Secret](https://docs.openshift.org/latest/dev_guide/secrets.html) named *sshkey*, so these are created first:
+
+ oc new-project certcheck
+ oc create configmap inventory --from-file=hosts=/etc/ansible/hosts
+ oc secrets new-sshauth sshkey --ssh-privatekey=$HOME/.ssh/id_rsa
+
+Note that `inventory`, `hosts`, `sshkey` and `ssh-privatekey` are referenced by name from the provided example Job definition. If you use different names for the objects/attributes you will have to adjust the Job accordingly.
+
+To create the Job:
+
+ oc create -f examples/certificate-check-upload.yaml
+
+### Scheduled job for certificate expiration report upload
+
+**Note**: This example uses the [ScheduledJob](https://docs.openshift.com/container-platform/3.4/dev_guide/scheduled_jobs.html) object, which has been renamed to [CronJob](https://docs.openshift.org/latest/dev_guide/cron_jobs.html) upstream and is still a Technology Preview subject to further change.
+
+The example `ScheduledJob` in [scheduled-certcheck-upload.yaml](scheduled-certcheck-upload.yaml) does the same as the `Job` example above, but it is scheduled to automatically run every first day of the month (see the `spec.schedule` value in the example).
+
+The job definition is the same and it expects the same configuration: we provide the inventory and ssh key via a ConfigMap and a Secret respectively:
+
+ oc new-project certcheck
+ oc create configmap inventory --from-file=hosts=/etc/ansible/hosts
+ oc secrets new-sshauth sshkey --ssh-privatekey=$HOME/.ssh/id_rsa
+
+And then we create the ScheduledJob:
+
+ oc create -f examples/scheduled-certcheck-upload.yaml
+
+### Job and ScheduledJob to check certificates using volumes
+
+There are two additional examples:
+
+ - A `Job` [certificate-check-volume.yaml](certificate-check-volume.yaml)
+ - A `ScheduledJob` [scheduled-certcheck-upload.yaml](scheduled-certcheck-upload.yaml)
+
+These perform the same work as the two examples above, but instead of uploading the generated reports to the masters they store them in a custom path within the container that is expected to be backed by a [PersistentVolumeClaim](https://docs.openshift.org/latest/dev_guide/persistent_volumes.html), so that the reports are actually written to storage external to the container.
+
+These examples assume that there is an existing `PersistentVolumeClaim` called `certcheck-reports` and they use the [`html_and_json_timestamp.yaml`](../playbooks/certificate_expiry/html_and_json_timestamp.yaml) example playbook to write timestamped reports into it.
+
+You can later access the reports from another pod that mounts the same volume, or externally via direct access to the backend storage behind the matching `PersistentVolume`.
+
+To run these examples we prepare the inventory and ssh keys as in the other examples:
+
+ oc new-project certcheck
+ oc create configmap inventory --from-file=hosts=/etc/ansible/hosts
+ oc secrets new-sshauth sshkey --ssh-privatekey=$HOME/.ssh/id_rsa
+
+Additionally we allocate a `PersistentVolumeClaim` to store the reports:
+
+ oc create -f - <<PVC
+ ---
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: certcheck-reports
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+ PVC
+
+With that we can run the `Job` once:
+
+ oc create -f examples/certificate-check-volume.yaml
+
+or schedule it to run periodically as a `ScheduledJob`:
+
+ oc create -f examples/scheduled-certcheck-volume.yaml
+
diff --git a/examples/certificate-check-upload.yaml b/examples/certificate-check-upload.yaml
new file mode 100644
index 000000000..b10a0b614
--- /dev/null
+++ b/examples/certificate-check-upload.yaml
@@ -0,0 +1,47 @@
+# An example Job to run a certificate check of OpenShift's internal
+# certificate status from within OpenShift.
+#
+# The generated reports are uploaded to a location in the master
+# hosts, using the playbook 'easy-mode-upload.yaml'.
+#
+# This example uses the openshift/openshift-ansible container image.
+# (see README_CONTAINER_IMAGE.md in the top level dir for more details).
+#
+# The following objects are xpected to be configured before the creation
+# of this Job:
+# - A ConfigMap named 'inventory' with a key named 'hosts' that
+# contains the the Ansible inventory file
+# - A Secret named 'sshkey' with a key named 'ssh-privatekey
+# that contains the ssh key to connect to the hosts
+# (see examples/README.md for more details)
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: certificate-check
+spec:
+ containers:
+ - name: openshift-ansible
+ image: openshift/openshift-ansible
+ env:
+ - name: PLAYBOOK_FILE
+ value: playbooks/certificate_expiry/easy-mode-upload.yaml
+ - name: INVENTORY_FILE
+ value: /tmp/inventory/hosts # from configmap vol below
+ - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below
+ value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey
+ - name: CERT_EXPIRY_WARN_DAYS
+ value: "45" # must be a string, don't forget the quotes
+ volumeMounts:
+ - name: sshkey
+ mountPath: /opt/app-root/src/.ssh/id_rsa
+ - name: inventory
+ mountPath: /tmp/inventory
+ volumes:
+ - name: sshkey
+ secret:
+ secretName: sshkey
+ - name: inventory
+ configMap:
+ name: inventory
+ restartPolicy: Never
diff --git a/examples/certificate-check-volume.yaml b/examples/certificate-check-volume.yaml
new file mode 100644
index 000000000..c19dc1f88
--- /dev/null
+++ b/examples/certificate-check-volume.yaml
@@ -0,0 +1,54 @@
+# An example Job to run a certificate check of OpenShift's internal
+# certificate status from within OpenShift.
+#
+# The generated reports are stored in a Persistent Volume using
+# the playbook 'html_and_json_timestamp.yaml'.
+#
+# This example uses the openshift/openshift-ansible container image.
+# (see README_CONTAINER_IMAGE.md in the top level dir for more details).
+#
+# The following objects are xpected to be configured before the creation
+# of this Job:
+# - A ConfigMap named 'inventory' with a key named 'hosts' that
+# contains the the Ansible inventory file
+# - A Secret named 'sshkey' with a key named 'ssh-privatekey
+# that contains the ssh key to connect to the hosts
+# - A PersistentVolumeClaim named 'certcheck-reports' where the
+# generated reports are going to be stored
+# (see examples/README.md for more details)
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: certificate-check
+spec:
+ containers:
+ - name: openshift-ansible
+ image: openshift/openshift-ansible
+ env:
+ - name: PLAYBOOK_FILE
+ value: playbooks/certificate_expiry/html_and_json_timestamp.yaml
+ - name: INVENTORY_FILE
+ value: /tmp/inventory/hosts # from configmap vol below
+ - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below
+ value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey
+ - name: CERT_EXPIRY_WARN_DAYS
+ value: "45" # must be a string, don't forget the quotes
+ volumeMounts:
+ - name: sshkey
+ mountPath: /opt/app-root/src/.ssh/id_rsa
+ - name: inventory
+ mountPath: /tmp/inventory
+ - name: reports
+ mountPath: /var/lib/certcheck
+ volumes:
+ - name: sshkey
+ secret:
+ secretName: sshkey
+ - name: inventory
+ configMap:
+ name: inventory
+ - name: reports
+ persistentVolumeClaim:
+ claimName: certcheck-reports
+ restartPolicy: Never
diff --git a/examples/scheduled-certcheck-upload.yaml b/examples/scheduled-certcheck-upload.yaml
new file mode 100644
index 000000000..b0a97361b
--- /dev/null
+++ b/examples/scheduled-certcheck-upload.yaml
@@ -0,0 +1,53 @@
+# An example ScheduledJob to run a regular check of OpenShift's internal
+# certificate status.
+#
+# Each job will upload new reports to a directory in the master hosts
+#
+# The Job specification is the same as 'certificate-check-upload.yaml'
+# and the expected pre-configuration is equivalent.
+# See that Job example and examples/README.md for more details.
+#
+# NOTE: ScheduledJob has been renamed to CronJob in upstream k8s recently. At
+# some point (OpenShift 3.6+) this will have to be renamed to "kind: CronJob"
+# and once the API stabilizes the apiVersion will have to be updated too.
+---
+apiVersion: batch/v2alpha1
+kind: ScheduledJob
+metadata:
+ name: certificate-check
+ labels:
+ app: certcheck
+spec:
+ schedule: "0 0 1 * *" # every 1st day of the month at midnight
+ jobTemplate:
+ metadata:
+ labels:
+ app: certcheck
+ spec:
+ template:
+ spec:
+ containers:
+ - name: openshift-ansible
+ image: openshift/openshift-ansible
+ env:
+ - name: PLAYBOOK_FILE
+ value: playbooks/certificate_expiry/easy-mode-upload.yaml
+ - name: INVENTORY_FILE
+ value: /tmp/inventory/hosts # from configmap vol below
+ - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below
+ value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey
+ - name: CERT_EXPIRY_WARN_DAYS
+ value: "45" # must be a string, don't forget the quotes
+ volumeMounts:
+ - name: sshkey
+ mountPath: /opt/app-root/src/.ssh/id_rsa
+ - name: inventory
+ mountPath: /tmp/inventory
+ volumes:
+ - name: sshkey
+ secret:
+ secretName: sshkey
+ - name: inventory
+ configMap:
+ name: inventory
+ restartPolicy: Never
diff --git a/examples/scheduled-certcheck-volume.yaml b/examples/scheduled-certcheck-volume.yaml
new file mode 100644
index 000000000..74cdc9e7f
--- /dev/null
+++ b/examples/scheduled-certcheck-volume.yaml
@@ -0,0 +1,58 @@
+# An example ScheduledJob to run a regular check of OpenShift's internal
+# certificate status.
+#
+# Each job will add a new pair of reports to the configured Persistent Volume
+#
+# The Job specification is the same as 'certificate-check-volume.yaml'
+# and the expected pre-configuration is equivalent.
+# See that Job example and examples/README.md for more details.
+#
+# NOTE: ScheduledJob has been renamed to CronJob in upstream k8s recently. At
+# some point (OpenShift 3.6+) this will have to be renamed to "kind: CronJob"
+# and once the API stabilizes the apiVersion will have to be updated too.
+---
+apiVersion: batch/v2alpha1
+kind: ScheduledJob
+metadata:
+ name: certificate-check
+ labels:
+ app: certcheck
+spec:
+ schedule: "0 0 1 * *" # every 1st day of the month at midnight
+ jobTemplate:
+ metadata:
+ labels:
+ app: certcheck
+ spec:
+ template:
+ spec:
+ containers:
+ - name: openshift-ansible
+ image: openshift/openshift-ansible
+ env:
+ - name: PLAYBOOK_FILE
+ value: playbooks/certificate_expiry/html_and_json_timestamp.yaml
+ - name: INVENTORY_FILE
+ value: /tmp/inventory/hosts # from configmap vol below
+ - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below
+ value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey
+ - name: CERT_EXPIRY_WARN_DAYS
+ value: "45" # must be a string, don't forget the quotes
+ volumeMounts:
+ - name: sshkey
+ mountPath: /opt/app-root/src/.ssh/id_rsa
+ - name: inventory
+ mountPath: /tmp/inventory
+ - name: reports
+ mountPath: /var/lib/certcheck
+ volumes:
+ - name: sshkey
+ secret:
+ secretName: sshkey
+ - name: inventory
+ configMap:
+ name: inventory
+ - name: reports
+ persistentVolumeClaim:
+ claimName: certcheck-reports
+ restartPolicy: Never
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index ed6923687..b550bd16a 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -1,26 +1,36 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
-# pylint: disable=no-name-in-module, import-error, wrong-import-order, ungrouped-imports
+# pylint: disable=too-many-lines
"""
Custom filters for use in openshift-ansible
"""
+import json
import os
import pdb
-import pkg_resources
-import re
-import json
-import yaml
import random
+import re
-from ansible import errors
from collections import Mapping
-from distutils.util import strtobool
-from distutils.version import LooseVersion
+# pylint no-name-in-module and import-error disabled here because pylint
+# fails to properly detect the packages when installed in a virtualenv
+from distutils.util import strtobool # pylint:disable=no-name-in-module,import-error
+from distutils.version import LooseVersion # pylint:disable=no-name-in-module,import-error
from operator import itemgetter
-from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+import pkg_resources
+import yaml
+
+from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
-from six import string_types
+
+# ansible.compat.six goes away with Ansible 2.4
+try:
+ from ansible.compat.six import string_types, u
+ from ansible.compat.six.moves.urllib.parse import urlparse
+except ImportError:
+ from ansible.module_utils.six import string_types, u
+ from ansible.module_utils.six.moves.urllib.parse import urlparse
HAS_OPENSSL = False
try:
@@ -29,15 +39,6 @@ try:
except ImportError:
pass
-try:
- # ansible-2.2
- # ansible.utils.unicode.to_unicode is deprecated in ansible-2.2,
- # ansible.module_utils._text.to_text should be used instead.
- from ansible.module_utils._text import to_text
-except ImportError:
- # ansible-2.1
- from ansible.utils.unicode import to_unicode as to_text
-
def oo_pdb(arg):
""" This pops you into a pdb instance where arg is the data passed in
@@ -117,8 +118,7 @@ def oo_merge_hostvars(hostvars, variables, inventory_hostname):
raise errors.AnsibleFilterError("|failed expects variables is a dictionary")
if not isinstance(inventory_hostname, string_types):
raise errors.AnsibleFilterError("|failed expects inventory_hostname is a string")
- # pylint: disable=no-member
- ansible_version = pkg_resources.get_distribution("ansible").version
+ ansible_version = pkg_resources.get_distribution("ansible").version # pylint: disable=maybe-no-member
merged_hostvars = {}
if LooseVersion(ansible_version) >= LooseVersion('2.0.0'):
merged_hostvars = oo_merge_dicts(
@@ -129,34 +129,57 @@ def oo_merge_hostvars(hostvars, variables, inventory_hostname):
return merged_hostvars
-def oo_collect(data, attribute=None, filters=None):
+def oo_collect(data_list, attribute=None, filters=None):
""" This takes a list of dict and collects all attributes specified into a
list. If filter is specified then we will include all items that
match _ALL_ of filters. If a dict entry is missing the key in a
filter it will be excluded from the match.
- Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
- {'a':2, 'z': 'z'}, # True, return
- {'a':3, 'z': 'z'}, # True, return
- {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
- ]
+ Ex: data_list = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
+ {'a':2, 'z': 'z'}, # True, return
+ {'a':3, 'z': 'z'}, # True, return
+ {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
+ ]
attribute = 'a'
filters = {'z': 'z'}
returns [1, 2, 3]
+
+ This also deals with lists of lists with dict as elements.
+ Ex: data_list = [
+ [ {'a':1, 'b':5, 'z': 'z'}, # True, return
+ {'a':2, 'b':6, 'z': 'z'} # True, return
+ ],
+ [ {'a':3, 'z': 'z'}, # True, return
+ {'a':4, 'z': 'b'} # FAILED, obj['z'] != obj['z']
+ ],
+ {'a':5, 'z': 'z'}, # True, return
+ ]
+ attribute = 'a'
+ filters = {'z': 'z'}
+ returns [1, 2, 3, 5]
"""
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects to filter on a List")
+ if not isinstance(data_list, list):
+ raise errors.AnsibleFilterError("oo_collect expects to filter on a List")
if not attribute:
- raise errors.AnsibleFilterError("|failed expects attribute to be set")
+ raise errors.AnsibleFilterError("oo_collect expects attribute to be set")
+
+ data = []
+ retval = []
+
+ for item in data_list:
+ if isinstance(item, list):
+ retval.extend(oo_collect(item, attribute, filters))
+ else:
+ data.append(item)
if filters is not None:
if not isinstance(filters, dict):
- raise errors.AnsibleFilterError("|failed expects filter to be a"
- " dict")
- retval = [get_attr(d, attribute) for d in data if (
- all([d.get(key, None) == filters[key] for key in filters]))]
+ raise errors.AnsibleFilterError(
+ "oo_collect expects filter to be a dict")
+ retval.extend([get_attr(d, attribute) for d in data if (
+ all([d.get(key, None) == filters[key] for key in filters]))])
else:
- retval = [get_attr(d, attribute) for d in data]
+ retval.extend([get_attr(d, attribute) for d in data])
retval = [val for val in retval if val is not None]
@@ -659,11 +682,11 @@ def to_padded_yaml(data, level=0, indent=2, **kw):
return ""
try:
- transformed = yaml.dump(data, indent=indent, allow_unicode=True,
- default_flow_style=False,
- Dumper=AnsibleDumper, **kw)
+ transformed = u(yaml.dump(data, indent=indent, allow_unicode=True,
+ default_flow_style=False,
+ Dumper=AnsibleDumper, **kw))
padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
- return to_text("\n{0}".format(padded))
+ return "\n{0}".format(padded)
except Exception as my_e:
raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
diff --git a/filter_plugins/openshift_version.py b/filter_plugins/openshift_version.py
new file mode 100644
index 000000000..1403e9dcc
--- /dev/null
+++ b/filter_plugins/openshift_version.py
@@ -0,0 +1,129 @@
+#!/usr/bin/python
+
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+"""
+Custom version comparison filters for use in openshift-ansible
+"""
+
+# pylint can't locate distutils.version within virtualenv
+# https://github.com/PyCQA/pylint/issues/73
+# pylint: disable=no-name-in-module, import-error
+from distutils.version import LooseVersion
+
+
+def legacy_gte_function_builder(name, versions):
+ """
+ Build and return a version comparison function.
+
+ Ex: name = 'oo_version_gte_3_1_or_1_1'
+ versions = {'enterprise': '3.1', 'origin': '1.1'}
+
+ returns oo_version_gte_3_1_or_1_1, a function which based on the
+ version and deployment type will return true if the provided
+ version is greater than or equal to the function's version
+ """
+ enterprise_version = versions['enterprise']
+ origin_version = versions['origin']
+
+ def _gte_function(version, deployment_type):
+ """
+ Dynamic function created by gte_function_builder.
+
+ Ex: version = '3.1'
+ deployment_type = 'openshift-enterprise'
+ returns True/False
+ """
+ version_gte = False
+ if 'enterprise' in deployment_type:
+ if str(version) >= LooseVersion(enterprise_version):
+ version_gte = True
+ elif 'origin' in deployment_type:
+ if str(version) >= LooseVersion(origin_version):
+ version_gte = True
+ return version_gte
+ _gte_function.__name__ = name
+ return _gte_function
+
+
+def gte_function_builder(name, gte_version):
+ """
+ Build and return a version comparison function.
+
+ Ex: name = 'oo_version_gte_3_6'
+ version = '3.6'
+
+ returns oo_version_gte_3_6, a function which based on the
+ version will return true if the provided version is greater
+ than or equal to the function's version
+ """
+ def _gte_function(version):
+ """
+ Dynamic function created by gte_function_builder.
+
+ Ex: version = '3.1'
+ returns True/False
+ """
+ version_gte = False
+ if str(version) >= LooseVersion(gte_version):
+ version_gte = True
+ return version_gte
+ _gte_function.__name__ = name
+ return _gte_function
+
+
+# pylint: disable=too-few-public-methods
+class FilterModule(object):
+ """
+ Filters for version checking.
+ """
+ # Each element of versions is composed of (major, minor_start, minor_end)
+ # Origin began versioning 3.x with 3.6, so begin 3.x with 3.6.
+ versions = [(3, 6, 10)]
+
+ def __init__(self):
+ """
+ Creates a new FilterModule for ose version checking.
+ """
+ self._filters = {}
+
+ # For each set of (major, minor, minor_iterations)
+ for major, minor_start, minor_end in self.versions:
+ # For each minor version in the range
+ for minor in range(minor_start, minor_end):
+ # Create the function name
+ func_name = 'oo_version_gte_{}_{}'.format(major, minor)
+ # Create the function with the builder
+ func = gte_function_builder(func_name, "{}.{}.0".format(major, minor))
+ # Add the function to the mapping
+ self._filters[func_name] = func
+
+ # Create filters with special versioning requirements.
+ # Treat all Origin 1.x as special case.
+ legacy_filters = [{'name': 'oo_version_gte_3_1_or_1_1',
+ 'versions': {'enterprise': '3.0.2.905',
+ 'origin': '1.1.0'}},
+ {'name': 'oo_version_gte_3_1_1_or_1_1_1',
+ 'versions': {'enterprise': '3.1.1',
+ 'origin': '1.1.1'}},
+ {'name': 'oo_version_gte_3_2_or_1_2',
+ 'versions': {'enterprise': '3.1.1.901',
+ 'origin': '1.2.0'}},
+ {'name': 'oo_version_gte_3_3_or_1_3',
+ 'versions': {'enterprise': '3.3.0',
+ 'origin': '1.3.0'}},
+ {'name': 'oo_version_gte_3_4_or_1_4',
+ 'versions': {'enterprise': '3.4.0',
+ 'origin': '1.4.0'}},
+ {'name': 'oo_version_gte_3_5_or_1_5',
+ 'versions': {'enterprise': '3.5.0',
+ 'origin': '1.5.0'}}]
+ for legacy_filter in legacy_filters:
+ self._filters[legacy_filter['name']] = legacy_gte_function_builder(legacy_filter['name'],
+ legacy_filter['versions'])
+
+ def filters(self):
+ """
+ Return the filters mapping.
+ """
+ return self._filters
diff --git a/inventory/byo/hosts.openstack b/inventory/byo/hosts.openstack
index ea7e905cb..c648078c4 100644
--- a/inventory/byo/hosts.openstack
+++ b/inventory/byo/hosts.openstack
@@ -15,7 +15,7 @@ ansible_become=yes
# Debug level for all OpenShift components (Defaults to 2)
debug_level=2
-deployment_type=openshift-enterprise
+openshift_deployment_type=openshift-enterprise
openshift_additional_repos=[{'id': 'ose-3.1', 'name': 'ose-3.1', 'baseurl': 'http://pulp.dist.prod.ext.phx2.redhat.com/content/dist/rhel/server/7/7Server/x86_64/ose/3.1/os', 'enabled': 1, 'gpgcheck': 0}]
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 6dec97fda..27914e60a 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -23,24 +23,24 @@ ansible_ssh_user=root
debug_level=2
# deployment type valid values are origin, online, atomic-enterprise and openshift-enterprise
-deployment_type=origin
+openshift_deployment_type=origin
# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
# rely on the version running on the first master. Works best for containerized installs where we can usually
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v1.4
+openshift_release=v1.5
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v1.2.0
+#openshift_image_tag=v1.5.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-1.2.0
+#openshift_pkg_version=-1.5.0
# Install the openshift examples
#openshift_install_examples=true
@@ -91,6 +91,10 @@ openshift_release=v1.4
# Specify exact version of etcd to configure or upgrade to.
# etcd_version="3.1.0"
+# Enable etcd debug logging, defaults to false
+# etcd_debug=true
+# Set etcd log levels by package
+# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG"
# Upgrade Hooks
#
@@ -265,6 +269,15 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Override master servingInfo.maxRequestsInFlight
#openshift_master_max_requests_inflight=500
+# Override master and node servingInfo.minTLSVersion and .cipherSuites
+# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12
+# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants
+#openshift_master_min_tls_version=VersionTLS12
+#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
+#
+#openshift_node_min_tls_version=VersionTLS12
+#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
+
# default storage plugin dependencies to install, by default the ceph and
# glusterfs plugin dependencies will be installed, if available.
#osn_storage_plugin_deps=['ceph','glusterfs','iscsi']
@@ -375,6 +388,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# based on the number of nodes matching the openshift registry selector.
#openshift_hosted_registry_replicas=2
#
+# Validity of the auto-generated certificate in days (optional)
+#openshift_hosted_registry_cert_expire_days=730
+#
# Disable management of the OpenShift Registry
#openshift_hosted_manage_registry=false
@@ -485,6 +501,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# your cloud platform use this.
#openshift_hosted_metrics_storage_kind=dynamic
#
+# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
+# list of options please see roles/openshift_metrics/README.md
+#
# Override metricsPublicURL in the master config for cluster metrics
# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
# Currently, you may only alter the hostname portion of the url, alterting the
@@ -530,15 +549,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# list of options please see roles/openshift_logging/README.md
#
# Configure loggingPublicURL in the master config for aggregate logging, defaults
-# to https://kibana.{{ openshift_master_default_subdomain }}
-#openshift_master_logging_public_url=https://kibana.example.com
+# to kibana.{{ openshift_master_default_subdomain }}
+#openshift_hosted_logging_hostname=logging.apps.example.com
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
# this value must be 1
#openshift_hosted_logging_elasticsearch_cluster_size=1
-#openshift_hosted_logging_hostname=logging.apps.example.com
-# Configure the prefix and version for the deployer image
-#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
-#openshift_hosted_logging_deployer_version=3.3.0
+# Configure the prefix and version for the component images
+#openshift_hosted_logging_deployer_prefix=docker.io/openshift/origin-
+#openshift_hosted_logging_deployer_version=1.5.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -633,7 +651,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure nodeIP in the node config
# This is needed in cases where node traffic is desired to go over an
# interface other than the default network interface.
-#openshift_node_set_node_ip=True
+#openshift_set_node_ip=True
# Force setting of system hostname when configuring OpenShift
# This works around issues related to installations that do not have valid dns
@@ -739,6 +757,18 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# by deployment_type=origin
#openshift_enable_origin_repo=false
+# Validity of the auto-generated OpenShift certificates in days.
+# See also openshift_hosted_registry_cert_expire_days above.
+#
+#openshift_ca_cert_expire_days=1825
+#openshift_node_cert_expire_days=730
+#openshift_master_cert_expire_days=730
+
+# Validity of the auto-generated external etcd certificates in days.
+# Controls validity for etcd CA, peer, server and client certificates.
+#
+#etcd_ca_default_days=1825
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 2b61e7d8d..f0269bff8 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -23,24 +23,24 @@ ansible_ssh_user=root
debug_level=2
# deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise
-deployment_type=openshift-enterprise
+openshift_deployment_type=openshift-enterprise
# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
# rely on the version running on the first master. Works best for containerized installs where we can usually
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v3.4
+openshift_release=v3.5
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.2.0.46
+#openshift_image_tag=v3.5.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.2.0.46
+#openshift_pkg_version=-3.5.0
# Install the openshift examples
#openshift_install_examples=true
@@ -91,6 +91,10 @@ openshift_release=v3.4
# Specify exact version of etcd to configure or upgrade to.
# etcd_version="3.1.0"
+# Enable etcd debug logging, defaults to false
+# etcd_debug=true
+# Set etcd log levels by package
+# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG"
# Upgrade Hooks
#
@@ -265,6 +269,15 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Override master servingInfo.maxRequestsInFlight
#openshift_master_max_requests_inflight=500
+# Override master and node servingInfo.minTLSVersion and .cipherSuites
+# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12
+# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants
+#openshift_master_min_tls_version=VersionTLS12
+#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
+#
+#openshift_node_min_tls_version=VersionTLS12
+#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
+
# default storage plugin dependencies to install, by default the ceph and
# glusterfs plugin dependencies will be installed, if available.
#osn_storage_plugin_deps=['ceph','glusterfs']
@@ -375,6 +388,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# based on the number of nodes matching the openshift registry selector.
#openshift_hosted_registry_replicas=2
#
+# Validity of the auto-generated certificate in days (optional)
+#openshift_hosted_registry_cert_expire_days=730
+#
# Disable management of the OpenShift Registry
#openshift_hosted_manage_registry=false
@@ -486,6 +502,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# your cloud platform use this.
#openshift_hosted_metrics_storage_kind=dynamic
#
+# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
+# list of options please see roles/openshift_metrics/README.md
+#
# Override metricsPublicURL in the master config for cluster metrics
# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
# Currently, you may only alter the hostname portion of the url, alterting the
@@ -531,15 +550,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# list of options please see roles/openshift_logging/README.md
#
# Configure loggingPublicURL in the master config for aggregate logging, defaults
-# to https://kibana.{{ openshift_master_default_subdomain }}
-#openshift_master_logging_public_url=https://kibana.example.com
+# to kibana.{{ openshift_master_default_subdomain }}
+#openshift_hosted_logging_hostname=logging.apps.example.com
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
# this value must be 1
#openshift_hosted_logging_elasticsearch_cluster_size=1
-#openshift_hosted_logging_hostname=logging.apps.example.com
-# Configure the prefix and version for the deployer image
+# Configure the prefix and version for the component images
#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
-#openshift_hosted_logging_deployer_version=3.3.0
+#openshift_hosted_logging_deployer_version=3.5.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -634,7 +652,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure nodeIP in the node config
# This is needed in cases where node traffic is desired to go over an
# interface other than the default network interface.
-#openshift_node_set_node_ip=True
+#openshift_set_node_ip=True
# Force setting of system hostname when configuring OpenShift
# This works around issues related to installations that do not have valid dns
@@ -736,6 +754,18 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Enable API service auditing, available as of 3.2
#openshift_master_audit_config={"basicAuditEnabled": true}
+# Validity of the auto-generated OpenShift certificates in days.
+# See also openshift_hosted_registry_cert_expire_days above.
+#
+#openshift_ca_cert_expire_days=1825
+#openshift_node_cert_expire_days=730
+#openshift_master_cert_expire_days=730
+
+# Validity of the auto-generated external etcd certificates in days.
+# Controls validity for etcd CA, peer, server and client certificates.
+#
+#etcd_ca_default_days=1825
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index c7b4544be..4a69537fe 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -9,7 +9,7 @@
%global __requires_exclude ^/usr/bin/ansible-playbook$
Name: openshift-ansible
-Version: 3.5.3
+Version: 3.6.21
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -21,7 +21,7 @@ Requires: ansible >= 2.2.0.0-1
Requires: python2
Requires: python-six
Requires: tar
-Requires: openshift-ansible-docs = %{version}-%{release}
+Requires: openshift-ansible-docs = %{version}
Requires: java-1.8.0-openjdk-headless
Requires: httpd-tools
Requires: libselinux-python
@@ -250,7 +250,7 @@ BuildArch: noarch
%package -n atomic-openshift-utils
Summary: Atomic OpenShift Utilities
BuildRequires: python-setuptools
-Requires: %{name}-playbooks >= %{version}
+Requires: %{name}-playbooks = %{version}
Requires: python-click
Requires: python-setuptools
Requires: PyYAML
@@ -270,6 +270,748 @@ Atomic OpenShift Utilities includes
%changelog
+* Tue Apr 11 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.21-1
+- Adding a query for the existing docker-registry route. (kwoodson@redhat.com)
+- Removing docker-registry route from cockpit-ui. (kwoodson@redhat.com)
+
+* Fri Apr 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.20-1
+- Fixed a bug when oc command fails. (kwoodson@redhat.com)
+- openshift_sanitize_inventory: validate release (lmeyer@redhat.com)
+
+* Fri Apr 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.19-1
+- Add example scheduled certificate check (pep@redhat.com)
+- Switch from ignoring to passing on checks (rteague@redhat.com)
+- Add tests for action plugin (rhcarvalho@gmail.com)
+- Remove unnecessary code (rhcarvalho@gmail.com)
+- Make resolve_checks more strict (rhcarvalho@gmail.com)
+
+* Fri Apr 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.18-1
+- master-api: add mount for /var/log (gscrivan@redhat.com)
+- master: add mount for /var/log (gscrivan@redhat.com)
+- unexclude excluder if it is to be upgraded and already installed
+ (jchaloup@redhat.com)
+- Bump calico policy controller (djosborne10@gmail.com)
+- Fixed a string format and a lint space issue (kwoodson@redhat.com)
+- Fixed name and selector to be mutually exclusive (kwoodson@redhat.com)
+- Adding ability to delete by selector. (kwoodson@redhat.com)
+- Adding delete with selector support. (kwoodson@redhat.com)
+
+* Thu Apr 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.17-1
+- Adding signed router cert and fixing server_cert bug. (kwoodson@redhat.com)
+
+* Wed Apr 05 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.16-1
+- Removing test coverage for shared code. (kwoodson@redhat.com)
+- Port 10255 unnecessary. Removing all instances (ccallega@redhat.com)
+- oo_filters: Disable pylint too-many-lines test (jarrpa@redhat.com)
+- oo_collect: Allow list elements to be lists of dict (jarrpa@redhat.com)
+- oc_label: handle case where _get() returns no results (jarrpa@redhat.com)
+- Addressing py27-yamllint (esauer@redhat.com)
+- Add 'docker-registry.default.svc' to cert-redeploy too (sdodson@redhat.com)
+- Support unicode output when dumping yaml (rteague@redhat.com)
+- Add docker-registry.default.svc short name to registry service signing
+ (sdodson@redhat.com)
+- oc_configmap: Add missing check for name (jarrpa@redhat.com)
+- oo_collect: Update comments to show source of failure (jarrpa@redhat.com)
+- openshift_facts: Allow examples_content_version to be set to v1.6
+ (jarrpa@redhat.com)
+- Restart polkitd to workaround a bug in polkitd (sdodson@redhat.com)
+- Add names to openshift_image_tag asserts (smilner@redhat.com)
+- doc: Remove atomic-openshift deployment type (smilner@redhat.com)
+- openshift_version now requires prepended version formats (smilner@redhat.com)
+- Warn if openshift_image_tag is defined by hand for package installs
+ (smilner@redhat.com)
+- Verify openshift_image_tag is valid during openshift_version main
+ (smilner@redhat.com)
+- Add openshift_version fact fallback debug messages (smilner@redhat.com)
+- cleanup: when in openshift_version tasks are multiline (smilner@redhat.com)
+- Compatibility updates to openshift_logging role for ansible 2.2.2.0+
+ (esauer@redhat.com)
+
+* Tue Apr 04 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.15-1
+- Document etcd_ca_default_days in example inventories. (abutcher@redhat.com)
+- Fixed a bug. Ansible requires a msg param when module.fail_json.
+ (kwoodson@redhat.com)
+
+* Sat Apr 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.14-1
+- Update v1.5 content (sdodson@redhat.com)
+- Add v1.6 content (sdodson@redhat.com)
+- Fix generated code (sdodson@redhat.com)
+- bug 1432607. Allow configuration of ES log destination (jcantril@redhat.com)
+- openshift_facts: install python3-dbus package on Fedora nodes.
+ (vsemushi@redhat.com)
+- Remove kube-nfs-volumes role (mawong@redhat.com)
+
+* Fri Mar 31 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.13-1
+- fixed decode switch so it works on OSX (stobias@harborfreight.com)
+- Wait for firewalld polkit policy to be defined (sdodson@redhat.com)
+- Correct copy task to use remote source (rteague@redhat.com)
+- validate and normalize inventory variables (lmeyer@redhat.com)
+- Fixed spacing. (kwoodson@redhat.com)
+- Fixed docs. Fixed add_resource. (kwoodson@redhat.com)
+- Fixing linting for spaces. (kwoodson@redhat.com)
+- Removing initial setting of metrics image prefix and version
+ (ewolinet@redhat.com)
+- Adding clusterrole to the toolbox. (kwoodson@redhat.com)
+- Fixed a bug in oc_volume. (kwoodson@redhat.com)
+- Adding a few more test cases. Fixed a bug when key was empty. Safeguard
+ against yedit module being passed an empty key (kwoodson@redhat.com)
+- Added the ability to do multiple edits (kwoodson@redhat.com)
+- fix es config merge so template does not need quoting. gen then merge
+ (jcantril@redhat.com)
+
+* Thu Mar 30 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.12-1
+- Update example inventory files to mention certificate validity parameters.
+ (vsemushi@redhat.com)
+- openshift_hosted: add openshift_hosted_registry_cert_expire_days parameter.
+ (vsemushi@redhat.com)
+- oc_adm_ca_server_cert.py: re-generate. (vsemushi@redhat.com)
+- oc_adm_ca_server_cert: add expire_days parameter. (vsemushi@redhat.com)
+- openshift_ca: add openshift_ca_cert_expire_days and
+ openshift_master_cert_expire_days parameters. (vsemushi@redhat.com)
+- redeploy-certificates/registry.yml: add
+ openshift_hosted_registry_cert_expire_days parameter. (vsemushi@redhat.com)
+- openshift_master_certificates: add openshift_master_cert_expire_days
+ parameter. (vsemushi@redhat.com)
+- openshift_node_certificates: add openshift_node_cert_expire_days parameter.
+ (vsemushi@redhat.com)
+- Update Dockerfile.rhel7 to reflect changes to Dockerfile (pep@redhat.com)
+
+* Wed Mar 29 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.11-1
+- Add etcd_debug and etcd_log_package_levels variables (sdodson@redhat.com)
+- Make the OCP available version detection excluder free (jchaloup@redhat.com)
+- Add test scaffold for docker_image_availability.py (rhcarvalho@gmail.com)
+- Add unit tests for package_version.py (rhcarvalho@gmail.com)
+- Add unit tests for package_update.py (rhcarvalho@gmail.com)
+- Add unit tests for package_availability.py (rhcarvalho@gmail.com)
+- Add unit tests for mixins.py (rhcarvalho@gmail.com)
+- Test recursively finding subclasses (rhcarvalho@gmail.com)
+- Test OpenShift health check loader (rhcarvalho@gmail.com)
+- Rename module_executor -> execute_module (rhcarvalho@gmail.com)
+- Use oo_version_gte_3_6+ for future versions and treat 1.x origin as legacy.
+ Add tests. (abutcher@redhat.com)
+- Added 3.5 -> 3.6 upgrade playbooks (skuznets@redhat.com)
+- Add oo_version_gte_X_X_or_Y_Y version comparison filters.
+ (abutcher@redhat.com)
+
+* Tue Mar 28 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.10-1
+- Use meta/main.yml for role dependencies (rteague@redhat.com)
+- Upgrade specific rpms instead of just master/node. (dgoodwin@redhat.com)
+- Adding namespace to doc. (kwoodson@redhat.com)
+- Add calico. (djosborne10@gmail.com)
+- Fixing up test cases, linting, and added a return. (kwoodson@redhat.com)
+- first step in ocimage (ihorvath@redhat.com)
+- ocimage (ihorvath@redhat.com)
+- Setting defaults on openshift_hosted. (kwoodson@redhat.com)
+- rebase and regenerate (jdiaz@redhat.com)
+- fix up things flagged by flake8 (jdiaz@redhat.com)
+- clean up and clarify docs/comments (jdiaz@redhat.com)
+- add oc_user ansible module (jdiaz@redhat.com)
+- Fix etcd cert generation (djosborne10@gmail.com)
+
+* Sat Mar 25 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.9-1
+- Found this while searching the metrics role for logging, is this wrong?
+ (sdodson@redhat.com)
+- Fix overriding openshift_{logging,metrics}_image_prefix (sdodson@redhat.com)
+- Make linter happy (sdodson@redhat.com)
+- Specify enterprise defaults for logging and metrics images
+ (sdodson@redhat.com)
+- Update s2i-dotnetcore content (sdodson@redhat.com)
+- Stop all services before upgrading openvswitch (sdodson@redhat.com)
+- Bug 1434300 - Log entries are generated in ES after deployed logging stacks
+ via ansible, but can not be found in kibana. (rmeggins@redhat.com)
+- Adding error checking to the delete. (kwoodson@redhat.com)
+- Updated comment. (kwoodson@redhat.com)
+- Fixed doc. Updated test to change existing key. Updated module spec for
+ required name param. (kwoodson@redhat.com)
+- Adding oc_configmap to lib_openshift. (kwoodson@redhat.com)
+
+* Fri Mar 24 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.8-1
+- vendor patched upstream docker_container module. (jvallejo@redhat.com)
+- add docker_image_availability check (jvallejo@redhat.com)
+- Do not use auto_expand_replicas (lukas.vlcek@gmail.com)
+- Adding tests to increase TC. (kwoodson@redhat.com)
+- Adding a pvc create test case. (kwoodson@redhat.com)
+- Cherry picking from #3711 (ewolinet@redhat.com)
+
+* Thu Mar 23 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.7-1
+- openshift_logging calculate min_masters to fail early on split brain
+ (jcantril@redhat.com)
+- Fixed linting and configmap_name param (kwoodson@redhat.com)
+- Adding configmap support. (kwoodson@redhat.com)
+- Make /rootfs mount rslave (sdodson@redhat.com)
+- Update imageConfig.format on upgrades to match oreg_url (sdodson@redhat.com)
+- Adding configmap support and adding tests. (kwoodson@redhat.com)
+- Adding oc_volume to lib_openshift. (kwoodson@redhat.com)
+- upgrade: restart ovs-vswitchd and ovsdb-server (gscrivan@redhat.com)
+- Make atomic-openshift-utils require playbooks of the same version
+ (sdodson@redhat.com)
+
+* Wed Mar 22 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.6-1
+- Fix copy-pasta docstrings (rhcarvalho@gmail.com)
+- Rename _ns -> node_selector (rhcarvalho@gmail.com)
+- Reindent code (rhcarvalho@gmail.com)
+- Update the failure methods and add required variables/functions
+ (tbielawa@redhat.com)
+- Import the default ansible output callback on_failed methods
+ (tbielawa@redhat.com)
+- Switched Cassandra to use certificates generated by OpenShift
+ (juraci@kroehling.de)
+- Allow user to specify additions to ES config (jcantril@redhat.com)
+
+* Tue Mar 21 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.5-1
+- Attempt to match version of excluders to target version (sdodson@redhat.com)
+- Get rid of adjust.yml (sdodson@redhat.com)
+- Protect against missing commands (sdodson@redhat.com)
+- Simplify excluder enablement logic a bit more (sdodson@redhat.com)
+- Add tito releaser for 3.6 (smunilla@redhat.com)
+- Adding oc_group to lib_openshift (kwoodson@redhat.com)
+- preflight checks: improve user output from checks (lmeyer@redhat.com)
+- preflight checks: bypass RPM excludes (lmeyer@redhat.com)
+- acceptschema2 default: true (aweiteka@redhat.com)
+- Do not require python-six via openshift_facts (rhcarvalho@gmail.com)
+
+* Sat Mar 18 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.4-1
+- Cherry picking from #3689 (ewolinet@redhat.com)
+- Moving projects task within openshift_hosted (rteague@redhat.com)
+- Refactor openshift_projects role (rteague@redhat.com)
+- Add unit tests for existing health checks (rhcarvalho@gmail.com)
+- Do not update when properties when not passed. (kwoodson@redhat.com)
+- change shell to bash in generate_jks.sh (l@lmello.eu.org)
+
+* Fri Mar 17 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.3-1
+- enable docker excluder since the time it is installed (jchaloup@redhat.com)
+
+* Thu Mar 16 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.2-1
+- enable excluders during node/master scaling up (jchaloup@redhat.com)
+- Fixing variable naming for 35 scoping. (kwoodson@redhat.com)
+- Fix get_router_replicas infrastructure node count. (abutcher@redhat.com)
+- Fix containerized openvswitch race (sdodson@redhat.com)
+
+* Thu Mar 16 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.1-1
+- Bump version to 3.6.0 (smunilla@redhat.com)
+- Improve CONTRIBUTING guide with testing tricks (rhcarvalho@gmail.com)
+- Update versions in example inventories (sdodson@redhat.com)
+- Only call excluder playbooks on masters and nodes (sdodson@redhat.com)
+- Since we've decided that we're no longer paying attention to current status
+ remove this as it was toggling things (sdodson@redhat.com)
+- Remove travis notifications (jdetiber@redhat.com)
+- Removing dependency on master facts for master_public_url default
+ (ewolinet@redhat.com)
+- don't assume openshift_upgrade_target is in a form d.d (jchaloup@redhat.com)
+- Cherry picked from #3657 (ewolinet@redhat.com)
+- Revert "Enable docker during installation and upgrade by default"
+ (skuznets@redhat.com)
+- Nuage service account handling by single master
+ (vishal.patil@nuagenetworks.net)
+- Add router svcacct cluster-reader role (rteague@redhat.com)
+- Cherry picking from #3644 (ewolinet@redhat.com)
+- Revert module_utils six for openshift_health_checker (jdetiber@redhat.com)
+- Refactor and remove openshift_serviceaccount (rteague@redhat.com)
+- Fix typo (sdodson@redhat.com)
+- Force to use TLSv1.2 (related to https://github.com/openshift/openshift-
+ ansible/pull/2707) (olivier@openkumo.fr)
+- Raise on dry-run failures. (kwoodson@redhat.com)
+- validate excluders on non-atomic hosts only (jchaloup@redhat.com)
+- enable docker excluder since the time it is installed (jchaloup@redhat.com)
+- cherry picking from #3621 #3614 #3627 (ewolinet@redhat.com)
+- Renaming oadm_manage_node to oc_adm_manage_node (rteague@redhat.com)
+- add 'hawkular/metrics' when updating config (jcantril@redhat.com)
+- update all the masters (jcantril@redhat.com)
+- bug 1430661. Update masterConfig metricsPublicURL on install
+ (jcantril@redhat.com)
+- nuage: Move role back to config (smilner@redhat.com)
+- Fix incorrect comparison when detecting petsets (tbielawa@redhat.com)
+- Removed unused, unwanted, incorrectly committed code. (kwoodson@redhat.com)
+- Minor updates to README_CONTAINER_IMAGE.md (pep@redhat.com)
+- Fix references to openshift_set_node_ip in inventory examples
+ (gskgoskk@gmail.com)
+- Bug 1428711 - [IntService_public_324] ES pod is unable to read
+ searchguard.truststore after upgarde logging from 3.3.1 to 3.5.0
+ (rmeggins@redhat.com)
+- bug 1428249. Use ES hostmount storage if it exists (jcantril@redhat.com)
+- Use ansible.compat.six where possible (jdetiber@redhat.com)
+- Remove debug task (tbielawa@redhat.com)
+- Use six from ansible.module_utils for remote hosts (jdetiber@redhat.com)
+- re-enable excluders if they are enabled after openshift version detection
+ (jchaloup@redhat.com)
+- Allow overriding minTLSVersion and cipherSuites (meggen@redhat.com)
+- extend the excluders to containerized deployment (jchaloup@redhat.com)
+- Fixing the way policies are found. The old method was unreliable. This
+ method searches all and matches on properties. (kwoodson@redhat.com)
+- openshift_excluders depends on openshift_repos (sdodson@redhat.com)
+- add ability to specify an etcd version (mmckinst@umich.edu)
+- Lowering test coverage percentage. (kwoodson@redhat.com)
+- Removing ordereddict. Replaced with sorted keys. (kwoodson@redhat.com)
+- New role (tbielawa@redhat.com)
+- Fixed for linting. (kwoodson@redhat.com)
+- enable excluders by default (jchaloup@redhat.com)
+- ignore the docker excluder status if it is not enabled by a user
+ (jchaloup@redhat.com)
+- Fix pylint/pyflakes errors on master (sdodson@redhat.com)
+- Identify PetSets in 3.4 clusters and fail if any are detected
+ (tbielawa@redhat.com)
+- More logging fixes (ewolinet@redhat.com)
+- Fix for issue 3541 (srampal@cisco.com)
+- Fix to OpenshiftCLIConfig to support an ordereddict. This was breaking test
+ cases. (kwoodson@redhat.com)
+- - update excluders to latest, in non-upgrade scenarios do not update - check
+ both available excluder versions are at most of upgrade target version - get
+ excluder status through status command - make excluders enablement
+ configurable (jchaloup@redhat.com)
+- Adding scripts for building and pushing images (bleanhar@redhat.com)
+- Adding test_oc_adm_router. (kwoodson@redhat.com)
+- Loosely couple docker to iptables service (rteague@redhat.com)
+- Generic message directing people to contact support (sdodson@redhat.com)
+- Fixing plugin, nodeselectors, and secret pull check (ewolinet@redhat.com)
+- Adding into the origin inventory doc. (kwoodson@redhat.com)
+- Add oc_objectvalidator to upgrade check (sdodson@redhat.com)
+- Augmenting documentation for router sharding. (kwoodson@redhat.com)
+- Adding router test. (kwoodson@redhat.com)
+- openshift_facts: ensure system containers deps are installed
+ (gscrivan@redhat.com)
+- Preserve order of Docker registries (eric.mountain@amadeus.com)
+- Updating metrics defaults (ewolinet@redhat.com)
+- Enable coveralls.io (jdetiber@redhat.com)
+- Fix indentation of run_once (sdodson@redhat.com)
+- Update docs for test consolidation and remove the Makefile
+ (jdetiber@redhat.com)
+- Consolidate root/utils tests (jdetiber@redhat.com)
+- Remove dummy setup/teardown methods (rhcarvalho@gmail.com)
+- Clean up test files (rhcarvalho@gmail.com)
+- Remove commented-out test code (rhcarvalho@gmail.com)
+- Make generic OCObjectValidator from OCSDNValidator (mkhan@redhat.com)
+- logging needs openshift_master_facts before openshift_facts
+ (rmeggins@redhat.com)
+- separate out test tool configs from setup.cfg (jdetiber@redhat.com)
+- Dockerfile and docs to run containerized playbooks (pep@redhat.com)
+- Lower test coverage percentage. (kwoodson@redhat.com)
+- Mock runs differntly on travis. Fix the mock test params to be ANY.
+ (kwoodson@redhat.com)
+- Fixed the none namespace. Fixed tests with latest loc_oc_binary call.
+ (kwoodson@redhat.com)
+- Updating the namespace param to None. (kwoodson@redhat.com)
+- Regenerated code with latest yedit changes. (kwoodson@redhat.com)
+- Fixed tests to align with new naming. (kwoodson@redhat.com)
+- Fixed docs. Added check for delete failures. Updated namespace to None.
+ (kwoodson@redhat.com)
+- Fixing linters (kwoodson@redhat.com)
+- Adding integration test. Fixed issue with node_selector.
+ (kwoodson@redhat.com)
+- Adding oc_project to lib_openshift. (kwoodson@redhat.com)
+- Remove old commented-out tests (rhcarvalho@gmail.com)
+- Remove redundant assertion (rhcarvalho@gmail.com)
+- Fix test (rhcarvalho@gmail.com)
+- Lint utils/test (rhcarvalho@gmail.com)
+- Rewrap long lines (rhcarvalho@gmail.com)
+- Remove unused argument (rhcarvalho@gmail.com)
+- Remove unused Makefile variables (rhcarvalho@gmail.com)
+- Adding some more logging defaults (ewolinet@redhat.com)
+- node/sdn: make /var/lib/cni persistent to ensure IPAM allocations stick
+ around across node restart (dcbw@redhat.com)
+- BZ1422348 - Don't install python-ruamel-yaml (sdodson@redhat.com)
+- Re-generate modules (sdodson@redhat.com)
+- Only set ownership to etcd for thirdparty datadir (sdodson@redhat.com)
+- Added ports. (kwoodson@redhat.com)
+- Fixed router name to produce 2nd router. (kwoodson@redhat.com)
+- Updated to work with an array of routers. (kwoodson@redhat.com)
+- Adding support for router sharding. (kwoodson@redhat.com)
+- Removing the openshift_master_facts dependency (ewolinet@redhat.com)
+- bug 1420256. Initialize openshift_logging pvc_facts to empty
+ (jcantril@redhat.com)
+- Add oc_adm_policy_user task cluster-role policy (rteague@redhat.com)
+- Correct config for hosted registry (rteague@redhat.com)
+- Fixing checkout for bindings with -binding suffix (jupierce@redhat.com)
+- Leave an empty contiv role directory (sdodson@redhat.com)
+- Updating stdout check for changed_when (ewolinet@redhat.com)
+- test fixes for openshift_certificates_expiry (jdetiber@redhat.com)
+- oadm_policy_group/adm_policy_user module (jupierce@redhat.com)
+- Fail on Atomic if docker is too old (smilner@redhat.com)
+- Remove contiv role and playbook from rpm packages (sdodson@redhat.com)
+- Resolving yammlint errors (ewolinet@redhat.com)
+- Fixed error handling when oc adm ca create-server-cert fails. Fixed a logic
+ error in secure. (kwoodson@redhat.com)
+- removing extra when condition (kwoodson@redhat.com)
+- Removing run_once. (kwoodson@redhat.com)
+- Adding the activeDeadlineSeconds. Removed debug. (kwoodson@redhat.com)
+- Separating routes so logic is simpler. (kwoodson@redhat.com)
+- Defaulting variables properly to avoid undefined route in dict error.
+ (kwoodson@redhat.com)
+- Add v1.3 FIS templates (sdodson@redhat.com)
+- v1.4 Add FIS templates (sdodson@redhat.com)
+- Add FIS templates (sdodson@redhat.com)
+- Removed duplicate host param. (kwoodson@redhat.com)
+- Fixed failures on create when objects exist. (kwoodson@redhat.com)
+- Add ca-bundle.crt to list of certs to synchronize. (abutcher@redhat.com)
+- Do not force custom ca cert deployment. (abutcher@redhat.com)
+- regenerate lib_openshift with yedit exception changes (jdiaz@redhat.com)
+- Adding changed_whens for role, rolebinding, and scc reconciliation based on
+ output from oadm policy command (ewolinet@redhat.com)
+- raise exceptions when walking through object path (jdiaz@redhat.com)
+- logging fluentd filter was renamed to viaq (rmeggins@redhat.com)
+- Add 'persistentVolumeClaim' to volume_info type (rteague@redhat.com)
+- Updating delete/recreate with replace --force. (kwoodson@redhat.com)
+- Fixed logic error. Ensure both svc and dc exist. (kwoodson@redhat.com)
+- Modified base debug statements. Fixed oc_secret debug/verbose flag. Added
+ reencrypt for route. (kwoodson@redhat.com)
+- Adding support for a route with certs and reencrypt. (kwoodson@redhat.com)
+- node: use the new oc_atomic_container module (gscrivan@redhat.com)
+- master: use the new oc_atomic_container module (gscrivan@redhat.com)
+- etcd: use the new oc_atomic_container module (gscrivan@redhat.com)
+- lib_openshift: new module atomic_container (gscrivan@redhat.com)
+- Combined (squashed) commit for all changes related to adding Contiv support
+ into Openshift Ansible. This is the first (beta) release of Contiv with
+ Openshift and is only supported for Openshift Origin + Bare metal deployments
+ at the time of this commit. Please refer to the Openshift and Contiv official
+ documentation for details of the level of support for different features and
+ modes of operation. (srampal@cisco.com)
+- Re-generate lib_openshift (sdodson@redhat.com)
+- Make s3_volume_mount available to set_fact call (smilner@redhat.com)
+- Correct fact creation for pvc (rteague@redhat.com)
+- [oc_obj] Move namespace argument to end of command. (abutcher@redhat.com)
+- Create hosted registry service (rteague@redhat.com)
+- Correct typo in haproxy router collection. (abutcher@redhat.com)
+- Fix issue #3505, add notes about origin upgrade versions support in BYO
+ upgrade README file (contact@stephane-klein.info)
+- Moving replica logic to filter_plugin to fix skipped task variable behavior.
+ (kwoodson@redhat.com)
+- install the latest excluders (jchaloup@redhat.com)
+- openshift_hosted: Update tasks to use oc_ modules (rteague@redhat.com)
+- Rebased. (kwoodson@redhat.com)
+- Fixed indentation (kwoodson@redhat.com)
+- Adding get_env_var to deploymentconfig. (kwoodson@redhat.com)
+- Fixed default variables. Added a fix to generated secret in env var.
+ (kwoodson@redhat.com)
+- Revert "Add centos paas sig common" (sdodson@redhat.com)
+- Fix Quick Installer failed due to a Python method failure
+ (tbielawa@redhat.com)
+- Removed JGroups cert and password generation. (juraci@kroehling.de)
+- Fix symlink to lookup_plugins/oo_option.py (jchaloup@redhat.com)
+- Use 2 and 3 friendly urlparse in oo_filters (smilner@redhat.com)
+- Update v1.5 content (sdodson@redhat.com)
+- Update v1.4 content (sdodson@redhat.com)
+- xPaaS ose-v1.3.6 (sdodson@redhat.com)
+- Prepare for origin moving to OCP version scheme (ccoleman@redhat.com)
+- initialize_openshift_version: handle excluder packages (gscrivan@redhat.com)
+- Add insecure edge termination policy for kibana. (whearn@redhat.com)
+- openshift_logging default to 2 replicas of primary shards
+ (jcantril@redhat.com)
+- Fixing doc for oc_adm_ca_server_cert. (kwoodson@redhat.com)
+- Convert selectattr tests to use 'match' (rteague@redhat.com)
+- Re-generate lib_openshift and lib_utils libraries (sdodson@redhat.com)
+- curator config must be in /etc/curator not /usr/curator (rmeggins@redhat.com)
+- Updated for pylint. Fixed create doc. (kwoodson@redhat.com)
+- Attempt to handle router preparation errors. (kwoodson@redhat.com)
+- Fixing the generate tox tests. (kwoodson@redhat.com)
+- BZ1414276 - Quote ansible_ssh_user when determining group id
+ (sdodson@redhat.com)
+- Moving import to local class. (kwoodson@redhat.com)
+- Added required_together. Added two minor bug fixes for when data is not
+ passed. (kwoodson@redhat.com)
+- fix up ruamel.yaml/pyyaml no-member lint errors (jdetiber@redhat.com)
+- Renamed NotContainerized to NotContainerizedMixin and dropped no-member
+ (smilner@redhat.com)
+- Removed unrequired no-members from yedit and generated code
+ (smilner@redhat.com)
+- Removing reference to oadm. Moved parameter under general params.
+ (kwoodson@redhat.com)
+- adding tag to update_master_config (ewolinet@redhat.com)
+- CloudFront oc_secret contents should be a list (smilner@redhat.com)
+- lib_openshift oc file lookup improvements (jdetiber@redhat.com)
+- roles/lib_openshift: Handle /usr/local/bin/oc with sudo (walters@verbum.org)
+- if no key, cert, cacert, or default_cert is passed then do not pass to oc
+ (kwoodson@redhat.com)
+- Added backup feature. Fixed a bug with reading the certificate and verifying
+ names. Added force option. (kwoodson@redhat.com)
+- Add SDNValidator Module (mkhan@redhat.com)
+- bug 1425321. Default the master api port based on the facts
+ (jcantril@redhat.com)
+- Bug 1420219 - No log entry can be found in Kibana UI after deploying logging
+ stacks with ansible (rmeggins@redhat.com)
+- Address cert expiry parsing review comments (tbielawa@redhat.com)
+- Fix typo (rhcarvalho@gmail.com)
+- Update link to project homepage (rhcarvalho@gmail.com)
+- Implement fake openssl cert classes (tbielawa@redhat.com)
+- Removed oadm_ references in doc. (kwoodson@redhat.com)
+- Remove unused plays (jhadvig@redhat.com)
+- Remove pytest-related dependencies from setup.py (rhcarvalho@gmail.com)
+- Added copy support when modifying cert and key on existence
+ (kwoodson@redhat.com)
+- Small spacing fix. (kwoodson@redhat.com)
+- Updated doc and defined defaults for signer_* (kwoodson@redhat.com)
+- Removed unused code. Made tests executable. (kwoodson@redhat.com)
+- Removing cmd, fixed docs and comments. (kwoodson@redhat.com)
+- Rename of oadm_ca to oc_adm_ca. Decided to whittle down to the direct call,
+ server_cert. (kwoodson@redhat.com)
+- Fixing doc. (kwoodson@redhat.com)
+- Adding oadm_ca to lib_openshift. (kwoodson@redhat.com)
+- Fixing docs. Fixed default_cert suggestion. (kwoodson@redhat.com)
+- Renamed modules, fixed docs, renamed variables, and cleaned up logic.
+ (kwoodson@redhat.com)
+- Renaming registry and router roles to oc_adm_ (kwoodson@redhat.com)
+- Fixing registry doc and suggestions. (kwoodson@redhat.com)
+- Adding router and registry to lib_openshift. (kwoodson@redhat.com)
+- bug 142026. Ensure Ops PVC prefix are initialized to empty when ops e…
+ nabled (jcantril@redhat.com)
+- Reverting logic for verify api handler to be uniform with other ways we
+ verify, will be uniformly updated in future (ewolinet@redhat.com)
+- bug 1417261. Quote name and secrets in logging templates
+ (jcantril@redhat.com)
+- openshift_facts: handle 'latest' version (gscrivan@redhat.com)
+- Surrounding node selector values with quotes (ewolinet@redhat.com)
+- Raise the bar on coverage requirements (rhcarvalho@gmail.com)
+- Accept extra positional arguments in tox (rhcarvalho@gmail.com)
+- Replace nose with pytest (utils) (rhcarvalho@gmail.com)
+- Clean up utils/README.md (rhcarvalho@gmail.com)
+- Replace nose with pytest (rhcarvalho@gmail.com)
+- Extract assertion common to all tests as function (rhcarvalho@gmail.com)
+- Replace nose yield-style tests w/ pytest fixtures (rhcarvalho@gmail.com)
+- Configure pytest to run tests and coverage (rhcarvalho@gmail.com)
+- Fix validation of generated code (rhcarvalho@gmail.com)
+- Make tests run with either nosetests or pytest (rhcarvalho@gmail.com)
+- Replace assert_equal with plain assert (rhcarvalho@gmail.com)
+- Make usage of short_version/release consistent (rhcarvalho@gmail.com)
+- Reorganize tests and helper functions logically (rhcarvalho@gmail.com)
+- Remove test duplication (rhcarvalho@gmail.com)
+- Move similar test cases together (rhcarvalho@gmail.com)
+- Insert paths in the second position of sys.path (rhcarvalho@gmail.com)
+- Rename test for consistency (rhcarvalho@gmail.com)
+- Replace has_key in new modules (smilner@redhat.com)
+- Fix symlink to filter_plugins/oo_filters.py (jchaloup@redhat.com)
+- Correct logic test for running pods (rteague@redhat.com)
+- Temporarily lower the bar for minimum coverage (rhcarvalho@gmail.com)
+- Unset exec bit in tests, add missing requirements (jdetiber@redhat.com)
+- Include missing unit tests to test runner config (rhcarvalho@gmail.com)
+- Fix tests on Python 3 (rhcarvalho@gmail.com)
+- Remove dead code in installer (rhcarvalho@gmail.com)
+- Remove dead code (rhcarvalho@gmail.com)
+- Document how to find dead Python code (rhcarvalho@gmail.com)
+- updating until statments on uri module for api verification
+ (ewolinet@redhat.com)
+- add dependency on openshift_repos (sdodson@redhat.com)
+- Fixing a bug by removing default debug (kwoodson@redhat.com)
+- Updating to use uri module instead (ewolinet@redhat.com)
+- Updating node playbooks to use oc_obj (rteague@redhat.com)
+- Add centos paas sig common (sdodson@redhat.com)
+- Disentangle openshift_repos from openshift_facts (sdodson@redhat.com)
+- Adding missing handler to resolve error that it was not found
+ (ewolinet@redhat.com)
+- String compatibility for python2,3 (kwoodson@redhat.com)
+- Fix indenting/ordering in router cert redeploy (sdodson@redhat.com)
+- post_control_plane.yml: don't fail on grep (gscrivan@redhat.com)
+- facts/main: Require Python 3 for Fedora, Python 2 everywhere else
+ (walters@verbum.org)
+- Fix typo, add symlinks for roles (sdodson@redhat.com)
+- Resolve deprecation warning (rteague@redhat.com)
+- Revert temporary hack to skip router/registry upgrade. (dgoodwin@redhat.com)
+- Don't attempt to install python-ruamel-yaml on atomic (sdodson@redhat.com)
+- Pleasing the linting gods. (kwoodson@redhat.com)
+- Fixed tests for pyyaml vs ruamel. Added import logic. Fixed safe load.
+ (kwoodson@redhat.com)
+- update example templates+imagestreams (bparees@redhat.com)
+- Adding fallback support for pyyaml. (kwoodson@redhat.com)
+- bug 1420217. Default ES memory to be compariable to 3.4 deployer
+ (jcantril@redhat.com)
+- Register cloudfront privkey when required (smilner@redhat.com)
+- initialize oo_nodes_to_upgrade group when running control plane upgrade only
+ (jchaloup@redhat.com)
+- adding some quotes for safety (ewolinet@redhat.com)
+- Revert "Add block+when skip to `openshift_facts` tasks" (abutcher@redhat.com)
+- Add missing full hostname for the Hawkular Metrics certificate (BZ1421060)
+ Fix issue where the signer certificate's name is static, preventing
+ redeployments from being acceptable. (mwringe@redhat.com)
+- fixing use of oc_scale module (ewolinet@redhat.com)
+- fixing default for logging (ewolinet@redhat.com)
+- Fix some lint (jdetiber@redhat.com)
+- Fixed issue where upgrade fails when using daemon sets (e.g. aggregated
+ logging) (adbaldi+ghub@gmail.com)
+- upgrades: fix path to disable_excluder.yml (jchaloup@redhat.com)
+- Add upgrade job step after the entire upgrade performs (maszulik@redhat.com)
+- Ansible Lint cleanup and making filter/lookup plugins used by
+ openshift_master_facts available within the role (jdetiber@redhat.com)
+- Update variant_version (smilner@redhat.com)
+- Add block+when skip to `openshift_facts` tasks (tbielawa@redhat.com)
+- Trying to fix up/audit note some changes (tbielawa@redhat.com)
+- updating defaults for logging and metrics roles (ewolinet@redhat.com)
+- Fix logic for checking docker-registry (rteague@redhat.com)
+- node, vars/main.yml: define l_is_ha and l_is_same_version
+ (gscrivan@redhat.com)
+- Modify playbooks to use oc_obj module (rteague@redhat.com)
+- master, vars/main.yml: define l_is_ha and l_is_same_version
+ (gscrivan@redhat.com)
+- oc route commands now using the oc_route module (smilner@redhat.com)
+- Modify playbooks to use oc_label module (rteague@redhat.com)
+- Fix cases where child classes override OpenShiftCLI values
+ (jdetiber@redhat.com)
+- BZ1421860: increase Heapster's metric resolution to 30s (mwringe@redhat.com)
+- BZ1421834: increase the Heapster metric resolution to 30s
+ (mwringe@redhat.com)
+- Fix Bug 1419654 Remove legacy config_base fallback to /etc/openshift
+ (sdodson@redhat.com)
+- Modify playbooks to use oadm_manage_node module (rteague@redhat.com)
+- Removing trailing spaces (esauer@redhat.com)
+- Removed adhoc s3_registry (smilner@redhat.com)
+- replace 'oc service' command with its lib_openshift equivalent
+ (jchaloup@redhat.com)
+- Making router pods scale with infra nodes (esauer@redhat.com)
+- Provisioning of nfs share and PV for logging ops (efreiber@redhat.com)
+- Add libselinux-python dependency for localhost (sdodson@redhat.com)
+- oc secrets now done via oc_secret module (smilner@redhat.com)
+- More fixes for reboot/wait for hosts. (dgoodwin@redhat.com)
+- fix openshift_logging where defaults filter needs quoting
+ (jcantril@redhat.com)
+- Do not hard code package names (rhcarvalho@gmail.com)
+- Refactor code to access values from task_vars (rhcarvalho@gmail.com)
+- oc serviceaccount now done via oc_serviceaccount module (smilner@redhat.com)
+- bug 1420229. Bounce metrics components to recognize changes on updates or
+ upgrades (jcantril@redhat.com)
+- node: simplify when conditionals (gscrivan@redhat.com)
+- openvswitch: simplify when conditionals (gscrivan@redhat.com)
+- uninstall: delete master-api and master-controllers (gscrivan@redhat.com)
+- master: support HA deployments with system containers (gscrivan@redhat.com)
+- Ensure etcd client certs are regenerated with embedded etcd.
+ (abutcher@redhat.com)
+- bug 1420425. Allow setting of public facing certs for kibana in
+ openshift_logging role (jcantril@redhat.com)
+- bug 1399523. Ops pvc should have different prefix from non-ops for
+ openshift_logging (jcantril@redhat.com)
+- Include rpm/git paths in expiry README. (tbielawa@redhat.com)
+- Fixing docs, linting, and comments. (kwoodson@redhat.com)
+- fix bug 1420204. Default openshift_logging_use_journal to empty so fluentd
+ detects and is consistent with deployer (jcantril@redhat.com)
+- Let pylint use as many CPUs as available (rhcarvalho@gmail.com)
+- Add note about extraneous virtualenvs (rhcarvalho@gmail.com)
+- Document how to create new checks (rhcarvalho@gmail.com)
+- Introduce tag notation for checks (rhcarvalho@gmail.com)
+- Replace multi-role checks with action plugin (rhcarvalho@gmail.com)
+- Removing the /usr/bin/ansible-playbook dependency in in the spec file
+ (mwoodson@redhat.com)
+- use the correct name for the ruamel-yaml python module (jchaloup@redhat.com)
+- Reword module documentation (rhcarvalho@gmail.com)
+- Separate import groups with a blank line (rhcarvalho@gmail.com)
+- Remove commented-out debugging code (rhcarvalho@gmail.com)
+- Replace service account secrets handling with oc_serviceaccount_secret module
+ (jchaloup@redhat.com)
+- node: refactor Docker container tasks in a block (gscrivan@redhat.com)
+- etcd: use as system container (gscrivan@redhat.com)
+- Implement uninstall for system containers (gscrivan@redhat.com)
+- system-containers: implement idempotent update (gscrivan@redhat.com)
+- atomic-openshift: install as a system container (gscrivan@redhat.com)
+- make sure cluster_size is an int for arith. ops (rmeggins@redhat.com)
+- Bug 1420234 - illegal_argument_exception in Kibana UI. (rmeggins@redhat.com)
+- bug 1420538. Allow users to set supplementalGroup for Cassandra
+ (jcantril@redhat.com)
+- Document openshift_cockpit_deployer_prefix and add
+ openshift_cockpit_deployer_version (sdodson@redhat.com)
+- Make the cert expiry playbooks runnable (tbielawa@redhat.com)
+- Ensure embedded etcd config uses CA bundle. (abutcher@redhat.com)
+- bug 1420684. On logging upgrade use the correct value for namespace
+ (jcantril@redhat.com)
+- Fixing docs. (kwoodson@redhat.com)
+- bug 1419962. fix openshift_metrics pwd issue after reinstall where cassandra
+ has incorrect pwd exception (jcantril@redhat.com)
+- Fixing for linters. (kwoodson@redhat.com)
+- Adding test cases. (kwoodson@redhat.com)
+- Fixing docs. (kwoodson@redhat.com)
+- oc process (ihorvath@redhat.com)
+- node: ensure conntrack-tools is installed (gscrivan@redhat.com)
+- Updating defaults to pull from previously defined variable names used in
+ playbooks (ewolinet@redhat.com)
+- Pleasing the linting bot. (kwoodson@redhat.com)
+- fixup! master: latest use same predicates as last version
+ (gscrivan@redhat.com)
+- fixup! master: latest use same priorities as last version
+ (gscrivan@redhat.com)
+- Adding integration tests. (kwoodson@redhat.com)
+- Set image change triggers to auto=true for OCP 3.4 - for v1.5
+ (simaishi@redhat.com)
+- Reference class instead of self.__class__ within super constructor to avoid
+ calling self forever. (abutcher@redhat.com)
+- Adding oc_env to lib_openshift. (kwoodson@redhat.com)
+- Fixing for flake8 spacing. (kwoodson@redhat.com)
+- Fixing tests for linters. (kwoodson@redhat.com)
+- Adding port support for route. (kwoodson@redhat.com)
+- use pvc_size instead of pv_size for openshift_metrics since the role creates
+ claims (jcantril@redhat.com)
+- Added temporary kubeconfig file. Fixed tests to coincide with tmpfile.
+ (kwoodson@redhat.com)
+- Set image change triggers to auto=true for OCP 3.4
+ (https://github.com/ManageIQ/manageiq-pods/pull/88) (simaishi@redhat.com)
+- fixes 1419839. Install only heapster for openshift_metrics when heapster
+ standalone flag is set (jcantril@redhat.com)
+- Adding code to copy kubeconfig before running oc commands.
+ (kwoodson@redhat.com)
+- master: latest use same predicates as last version (gscrivan@redhat.com)
+- master: latest use same priorities as last version (gscrivan@redhat.com)
+- Changed lib_openshift to use real temporary files. (twiest@redhat.com)
+- Fixed ansible module unit and integration tests and added runners.
+ (twiest@redhat.com)
+- Moving to ansible variable. (kwoodson@redhat.com)
+- Specifying port for wait_for call. (kwoodson@redhat.com)
+- Reverting commit 3257 and renaming master_url to openshift_logging_master_url
+ (ewolinet@redhat.com)
+- [openshift_ca] Reference client binary from openshift_ca_host.
+ (abutcher@redhat.com)
+- Fix playbooks/byo/openshift_facts.yml include path (sdodson@redhat.com)
+- Add missing symlink to roles (rhcarvalho@gmail.com)
+- Bump registry-console to 3.5 (sdodson@redhat.com)
+- Added oc_serviceaccount_secret to lib_openshift. (twiest@redhat.com)
+- fix 1406057. Allow openshift_metrics nodeselectors for components
+ (jcantril@redhat.com)
+- Use service annotations to redeploy router service serving cert signer cert.
+ (abutcher@redhat.com)
+- Move excluder disablement into control plane and node upgrade playbooks
+ (sdodson@redhat.com)
+- Add excluder management to upgrade and config playbooks (sdodson@redhat.com)
+- Add openshift_excluder role (sdodson@redhat.com)
+- Fix RHEL Subscribe std_include path (tbielawa@redhat.com)
+- Copies CloudFront pem file to registry hosts (smilner@redhat.com)
+- Remove legacy router/registry certs and client configs from synchronized
+ master certs. (abutcher@redhat.com)
+- Bump registry to 3.4 (sdodson@redhat.com)
+- Sync latest image stream content (sdodson@redhat.com)
+- Support latest for containerized version (gscrivan@redhat.com)
+- Ensure python2-ruamel-yaml is installed (sdodson@redhat.com)
+- openshift_logging link pull secret to serviceaccounts fix unlabel when
+ undeploying (jcantril@redhat.com)
+- fixes 1414625. Fix check of keytool in openshift_metrics role
+ (jcantril@redhat.com)
+- Doc enhancements. (kwoodson@redhat.com)
+- fixes 1417261. Points playbooks to the correct 3.5 roles for logging and
+ metrics (jcantril@redhat.com)
+- Change default docker log driver from json-file to journald.
+ (abutcher@redhat.com)
+- Add logic to verify patched version of Ansible (rteague@redhat.com)
+- Restructure certificate redeploy playbooks (abutcher@redhat.com)
+- Temporary hack to skip router/registry upgrade. (dgoodwin@redhat.com)
+- Fixing linters. (kwoodson@redhat.com)
+- run node upgrade if master is node as part of the control plan upgrade only
+ (jchaloup@redhat.com)
+- Appease yamllint (sdodson@redhat.com)
+- Adding include_role to block to resolve when eval (ewolinet@redhat.com)
+- Updating oc_apply to use command instead of shell (ewolinet@redhat.com)
+- Wrap openshift_hosted_logging include_role within a block.
+ (abutcher@redhat.com)
+- Adding unit test. Fixed redudant calls to get. (kwoodson@redhat.com)
+- Fixing doc and generating new label with updated base. (kwoodson@redhat.com)
+- oc_label ansible module (jdiaz@redhat.com)
+- Fixing copy pasta comments. Fixed required in docs. (kwoodson@redhat.com)
+- Fix openshift_hosted_logging bool typo. (abutcher@redhat.com)
+- Updating oc_apply changed_when conditions, fixing filter usage for
+ openshift_hosted_logging playbook (ewolinet@redhat.com)
+- Add default ansible.cfg file (rteague@redhat.com)
+- Move current node upgrade tasks under openshift_node_upgrade role
+ (jchaloup@redhat.com)
+- Fix host when waiting for a master system restart. (dgoodwin@redhat.com)
+- Adding bool filter to when openshift_logging_use_ops evals and updating
+ oc_apply to handle trying to update immutable fields (ewolinet@redhat.com)
+- Fixing for tox tests. (flake8|pylint) (kwoodson@redhat.com)
+- Adding unit test for oc_service. Added environment fix for non-standard oc
+ installs. (kwoodson@redhat.com)
+- Adding integration tests. (kwoodson@redhat.com)
+- Adding oc_service to lib_openshift. (kwoodson@redhat.com)
+- Sync etcd ca certs from etcd_ca_host to other etcd hosts
+ (jawed.khelil@amadeus.com)
+
* Tue Jan 31 2017 Scott Dodson <sdodson@redhat.com> 3.5.3-1
- Adding bool filter to ensure that we correctly set ops host for fluentd
(ewolinet@redhat.com)
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index d60b68885..8d64b0521 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -33,5 +33,6 @@
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
+ openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
openshift_use_dnsmasq: false
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 86eff4ca4..4db0720d0 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -7,5 +7,4 @@
vars:
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: "{{ debug_level | default(2) }}"
- openshift_deployment_type: "{{ deployment_type }}"
openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}"
diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml
index eebfcd20d..f8eebe898 100644
--- a/playbooks/byo/openshift-cluster/openshift-logging.yml
+++ b/playbooks/byo/openshift-cluster/openshift-logging.yml
@@ -32,4 +32,3 @@
vars:
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: "{{ debug_level | default(2) }}"
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 4ee6afe2a..304559f6e 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -28,7 +28,7 @@
tasks:
- name: Mark node unschedulable
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -51,7 +51,7 @@
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
index d5fd7c424..5feb33be4 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -30,7 +30,6 @@
g_new_master_hosts: []
g_new_node_hosts: []
openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_deployment_type: "{{ deployment_type }}"
- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
index e4db65b02..86f5a36ca 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -50,6 +50,8 @@
tags:
- pre_upgrade
+# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play.
+# So it is necassary to run the play after running disable_excluder.yml.
- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md
new file mode 100644
index 000000000..930cc753c
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md
@@ -0,0 +1,18 @@
+# v3.5 Major and Minor Upgrade Playbook
+
+## Overview
+This playbook currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Unschedule node.
+ * Upgrade and restart docker
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/roles b/playbooks/byo/openshift-cluster/upgrades/v3_6/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
new file mode 100644
index 000000000..900bbc8d8
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -0,0 +1,111 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
+ tags:
+ - pre_upgrade
+
+# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play.
+# So it is necassary to run the play after running disable_excluder.yml.
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/v3_6/validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
new file mode 100644
index 000000000..5bd0f7ac5
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -0,0 +1,115 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/v3_6/validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
new file mode 100644
index 000000000..96d89dbdd
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -0,0 +1,104 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-etcd/restart.yml b/playbooks/byo/openshift-etcd/restart.yml
index 6713f07e3..19403116d 100644
--- a/playbooks/byo/openshift-etcd/restart.yml
+++ b/playbooks/byo/openshift-etcd/restart.yml
@@ -4,5 +4,3 @@
- always
- include: ../../common/openshift-etcd/restart.yml
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml
index 2d20f69f4..21e4cff1b 100644
--- a/playbooks/byo/openshift-master/restart.yml
+++ b/playbooks/byo/openshift-master/restart.yml
@@ -4,5 +4,3 @@
- always
- include: ../../common/openshift-master/restart.yml
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml
index 7075bb59e..a5705e990 100644
--- a/playbooks/byo/openshift-master/scaleup.yml
+++ b/playbooks/byo/openshift-master/scaleup.yml
@@ -27,4 +27,3 @@
vars:
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: "{{ debug_level | default(2) }}"
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-node/restart.yml b/playbooks/byo/openshift-node/restart.yml
index 3985a83bb..6861625b9 100644
--- a/playbooks/byo/openshift-node/restart.yml
+++ b/playbooks/byo/openshift-node/restart.yml
@@ -4,5 +4,3 @@
- always
- include: ../../common/openshift-node/restart.yml
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml
index 2b10b6c76..88d236b53 100644
--- a/playbooks/byo/openshift-node/scaleup.yml
+++ b/playbooks/byo/openshift-node/scaleup.yml
@@ -27,6 +27,5 @@
vars:
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: "{{ debug_level | default(2) }}"
- openshift_deployment_type: "{{ deployment_type }}"
openshift_master_etcd_hosts: "{{ groups.etcd | default([]) }}"
openshift_master_etcd_port: 2379
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 65c0b1c01..8c6d77024 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -5,8 +5,6 @@
- name: Subscribe hosts, update repos and update OS packages
hosts: l_oo_all_hosts
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
roles:
- role: rhel_subscribe
when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
diff --git a/playbooks/certificate_expiry/easy-mode-upload.yaml b/playbooks/certificate_expiry/easy-mode-upload.yaml
new file mode 100644
index 000000000..378d1f154
--- /dev/null
+++ b/playbooks/certificate_expiry/easy-mode-upload.yaml
@@ -0,0 +1,40 @@
+# This example generates HTML and JSON reports and
+#
+# Copies of the generated HTML and JSON reports are uploaded to the masters,
+# which is particularly useful when this playbook is run from a container.
+#
+# All certificates (healthy or not) are included in the results
+#
+# Optional environment variables to alter the behaviour of the playbook:
+# CERT_EXPIRY_WARN_DAYS: Length of the warning window in days (45)
+# COPY_TO_PATH: path to copy reports to in the masters (/etc/origin/certificate_expiration_report)
+---
+- name: Generate certificate expiration reports
+ hosts: nodes:masters:etcd
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_show_all: yes
+ openshift_certificate_expiry_warning_days: "{{ lookup('env', 'CERT_EXPIRY_WARN_DAYS') | default('45', true) }}"
+ roles:
+ - role: openshift_certificate_expiry
+
+- name: Upload reports to master
+ hosts: masters
+ gather_facts: no
+ vars:
+ destination_path: "{{ lookup('env', 'COPY_TO_PATH') | default('/etc/origin/certificate_expiration_report', true) }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}"
+ tasks:
+ - name: Ensure that the target directory exists
+ file:
+ path: "{{ destination_path }}"
+ state: directory
+ - name: Copy the reports
+ copy:
+ dest: "{{ destination_path }}/{{ timestamp }}-{{ item }}"
+ src: "/tmp/{{ item }}"
+ with_items:
+ - "cert-expiry-report.html"
+ - "cert-expiry-report.json"
diff --git a/playbooks/certificate_expiry/html_and_json_timestamp.yaml b/playbooks/certificate_expiry/html_and_json_timestamp.yaml
new file mode 100644
index 000000000..2189455b7
--- /dev/null
+++ b/playbooks/certificate_expiry/html_and_json_timestamp.yaml
@@ -0,0 +1,16 @@
+---
+# Generate timestamped HTML and JSON reports in /var/lib/certcheck
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_show_all: yes
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}"
+ openshift_certificate_expiry_html_report_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.html"
+ openshift_certificate_expiry_json_results_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.json"
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 82f711f40..1b967b7f1 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -27,9 +27,6 @@
when: openshift_docker_selinux_enabled is not defined
- include: disable_excluder.yml
- vars:
- # the excluders needs to be disabled no matter what status says
- with_status_check: false
tags:
- always
@@ -60,3 +57,7 @@
- include: openshift_hosted.yml
tags:
- hosted
+
+- include: reset_excluder.yml
+ tags:
+ - always
diff --git a/playbooks/common/openshift-cluster/disable_excluder.yml b/playbooks/common/openshift-cluster/disable_excluder.yml
index b2e025cb8..f664c51c9 100644
--- a/playbooks/common/openshift-cluster/disable_excluder.yml
+++ b/playbooks/common/openshift-cluster/disable_excluder.yml
@@ -1,6 +1,6 @@
---
-- name: Record excluder state and disable
- hosts: l_oo_all_hosts
+- name: Disable excluders
+ hosts: oo_masters_to_config:oo_nodes_to_config
gather_facts: no
tasks:
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
index ca5177852..5425f448f 100644
--- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
@@ -56,8 +56,6 @@
- role: node
local_facts:
dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
roles:
- openshift_node_dnsmasq
post_tasks:
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 18f99728c..9cebecd68 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -15,5 +15,3 @@
hostname: "{{ openshift_hostname | default(None) }}"
- set_fact:
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- - set_fact:
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 7f37c606f..07b38920f 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -18,13 +18,6 @@
msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils.
when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout"
-- include: disable_excluder.yml
- vars:
- # the excluders needs to be disabled no matter what status says
- with_status_check: false
- tags:
- - always
-
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
roles:
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 06cda36a5..5db71b857 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -53,6 +53,8 @@
pre_tasks:
- set_fact:
openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ - set_fact:
+ openshift_metrics_hawkular_hostname: "{{ g_metrics_hostname | default('hawkular-metrics.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
tasks:
- block:
@@ -60,3 +62,9 @@
name: openshift_logging
tasks_from: update_master_config
when: openshift_hosted_logging_deploy | default(false) | bool
+
+ - block:
+ - include_role:
+ name: openshift_metrics
+ tasks_from: update_master_config
+ when: openshift_hosted_metrics_deploy | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
index 2af699209..cbb4a2434 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
@@ -31,7 +31,7 @@
- name: Generate new etcd CA
hosts: oo_first_etcd
roles:
- - role: etcd_ca
+ - role: openshift_etcd_ca
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
index 6771cc98d..8c8062585 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
@@ -48,10 +48,6 @@
# Replace dc/docker-registry certificate secret contents if set.
- block:
- - name: Load lib_openshift modules
- include_role:
- name: lib_openshift
-
- name: Retrieve registry service IP
oc_service:
namespace: default
@@ -70,9 +66,12 @@
--signer-cert={{ openshift.common.config_base }}/master/ca.crt
--signer-key={{ openshift.common.config_base }}/master/ca.key
--signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt
- --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
+ --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
--cert={{ openshift.common.config_base }}/master/registry.crt
--key={{ openshift.common.config_base }}/master/registry.key
+ {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
+ --expire-days={{ openshift_hosted_registry_cert_expire_days | default(730) }}
+ {% endif %}
- name: Update registry certificates secret
oc_secret:
diff --git a/playbooks/common/openshift-cluster/reset_excluder.yml b/playbooks/common/openshift-cluster/reset_excluder.yml
index 7c544ee32..eaa8ce39c 100644
--- a/playbooks/common/openshift-cluster/reset_excluder.yml
+++ b/playbooks/common/openshift-cluster/reset_excluder.yml
@@ -1,6 +1,6 @@
---
- name: Re-enable excluder if it was previously enabled
- hosts: l_oo_all_hosts
+ hosts: oo_masters_to_config:oo_nodes_to_config
gather_facts: no
tasks:
- include_role:
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
index 078991b12..74cc1d527 100644
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ b/playbooks/common/openshift-cluster/std_include.yml
@@ -22,8 +22,6 @@
- always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - set_fact:
- openshift_deployment_type: "{{ deployment_type }}"
- include: evaluate_groups.yml
tags:
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
index b83e4d821..be956fca5 100644
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -3,8 +3,6 @@
- name: Subscribe hosts, update repos and update OS packages
hosts: oo_hosts_to_update
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
roles:
# Explicitly calling openshift_facts because it appears that when
# rhel_subscribe is skipped that the openshift_facts dependency for
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
index 2a85dc92e..d1e431c5e 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
@@ -1,6 +1,6 @@
---
- name: Record excluder state and disable
- hosts: l_oo_all_hosts
+ hosts: oo_masters_to_config:oo_nodes_to_config
gather_facts: no
tasks:
- include: pre/validate_excluder.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index a3b8c489e..01d151eb9 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -29,7 +29,6 @@
g_new_master_hosts: []
g_new_node_hosts: []
openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_deployment_type: "{{ deployment_type }}"
- name: Set oo_options
hosts: oo_all_hosts
@@ -71,8 +70,8 @@
tasks:
- name: Check if iptables is running
command: systemctl status iptables
- ignore_errors: true
changed_when: false
+ failed_when: false
register: service_iptables_status
- name: Set fact os_firewall_use_firewalld FALSE for iptables
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 6f096f705..c00795a8d 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -5,7 +5,6 @@
- name: Upgrade default router and default registry
hosts: oo_first_master
vars:
- openshift_deployment_type: "{{ deployment_type }}"
registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}"
router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}"
oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
index 5078638b7..6de1ed061 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
@@ -3,20 +3,27 @@
# - repoquery_cmd
# - excluder
# - openshift_upgrade_target
-- name: Get available excluder version
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}"
- register: excluder_version
- failed_when: false
- changed_when: false
+- block:
+ - name: Get available excluder version
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}"
+ register: excluder_version
+ failed_when: false
+ changed_when: false
-- name: Docker excluder version detected
- debug:
- msg: "{{ excluder }}: {{ excluder_version.stdout }}"
+ - name: Docker excluder version detected
+ debug:
+ msg: "{{ excluder }}: {{ excluder_version.stdout }}"
-- name: Check the available {{ excluder }} version is at most of the upgrade target version
- fail:
- msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version {{ openshift_upgrade_target }}"
- when:
+ - name: Printing upgrade target version
+ debug:
+ msg: "{{ openshift_upgrade_target }}"
+
+ - name: Check the available {{ excluder }} version is at most of the upgrade target version
+ fail:
+ msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version"
+ when:
- "{{ excluder_version.stdout != '' }}"
- - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target, '>', strict=True) }}"
+ - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True) }}"
+ when:
+ - not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index df2b664d4..03ac02e9f 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -1,7 +1,26 @@
---
# We verified latest rpm available is suitable, so just yum update.
-- name: Upgrade packages
- package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
+
+# Master package upgrade ends up depending on node and sdn packages, we need to be explicit
+# with all versions to avoid yum from accidentally jumping to something newer than intended:
+- name: Upgrade master packages
+ package: name={{ item }} state=present
+ when: component == "master"
+ with_items:
+ - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
+
+- name: Upgrade node packages
+ package: name={{ item }} state=present
+ when: component == "node"
+ with_items:
+ - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- name: Ensure python-yaml present for config upgrade
package: name=PyYAML state=present
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index babb7191d..c6e799261 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -64,6 +64,7 @@
static: yes
roles:
- openshift_facts
+ - lib_utils
post_tasks:
# Run the pre-upgrade hook if defined:
@@ -113,6 +114,13 @@
state: link
when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
+ - name: Update oreg value
+ yedit:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ key: 'imageConfig.format'
+ value: "{{ oreg_url }}"
+ when: oreg_url is defined
+
# Run the upgrade hook prior to restarting services/system if defined:
- debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
@@ -262,7 +270,7 @@
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- name: Mark node unschedulable
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -284,7 +292,7 @@
post_tasks:
- name: Set node schedulability
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 4e1838c71..e9f894942 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -15,7 +15,7 @@
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- name: Mark node unschedulable
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -37,7 +37,7 @@
post_tasks:
- name: Set node schedulability
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
index 9c126033c..ae63c9ca9 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
@@ -35,7 +35,7 @@
kind: petsets
register: l_do_petsets_exist
- - name: FAIL ON Resource migration 'PetSets' unsupported
+ - name: Fail on unsupported resource migration 'PetSets'
fail:
msg: >
PetSet objects were detected in your cluster. These are an
@@ -59,9 +59,9 @@
migrating to StatefulSets, run this command as a user with
cluster-admin privileges:
- $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascale=false
+ $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascade=false
when:
# Search did not fail, valid resource type found
- - l_do_petsets_exist.results.returncode == "0"
+ - l_do_petsets_exist.results.returncode == 0
# Items do exist in the search results
- l_do_petsets_exist.results.results.0['items'] | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins
new file mode 120000
index 000000000..7de3c1dd7
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/roles b/playbooks/common/openshift-cluster/upgrades/v3_6/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml
new file mode 100644
index 000000000..48c69eccd
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml
@@ -0,0 +1,18 @@
+---
+###############################################################################
+# Post upgrade - Upgrade job storage
+###############################################################################
+- name: Upgrade job storage
+ hosts: oo_first_master
+ roles:
+ - { role: openshift_cli }
+ vars:
+ # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
+ # restart.
+ skip_docker_role: True
+ tasks:
+ - name: Upgrade job storage
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=jobs --confirm
+ run_once: true
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
new file mode 100644
index 000000000..ac5704f69
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
@@ -0,0 +1,10 @@
+---
+###############################################################################
+# Pre upgrade checks for known data problems, if this playbook fails you should
+# contact support. If you're not supported contact users@lists.openshift.com
+###############################################################################
+- name: Verify 3.6 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks: []
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 7a334e771..60cf56108 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -48,12 +48,6 @@
- set_fact:
openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default('10s', true) }}"
when: openshift_hosted_metrics_resolution is not defined
- - set_fact:
- openshift_hosted_metrics_deployer_prefix: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_prefix') | default('openshift') }}"
- when: openshift_hosted_metrics_deployer_prefix is not defined
- - set_fact:
- openshift_hosted_metrics_deployer_version: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_version') | default('latest') }}"
- when: openshift_hosted_metrics_deployer_version is not defined
roles:
- openshift_facts
post_tasks:
@@ -127,6 +121,10 @@
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: "master.etcd-"
+ - role: nuage_master
+ when: openshift.common.use_nuage | bool
+ - role: calico_master
+ when: openshift.common.use_calico | bool
post_tasks:
- name: Create group for deployment type
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index 18e5c665f..92f16dc47 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -60,8 +60,16 @@
- openshift_facts
- openshift_docker
+- include: ../openshift-cluster/disable_excluder.yml
+ tags:
+ - always
+
- include: ../openshift-master/config.yml
- include: ../openshift-loadbalancer/config.yml
- include: ../openshift-node/config.yml
+
+- include: ../openshift-cluster/reset_excluder.yml
+ tags:
+ - always
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 6c5a299c1..792ffb4e2 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -82,6 +82,8 @@
etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
when: openshift.common.use_flannel | bool
+ - role: calico
+ when: openshift.common.use_calico | bool
- role: nuage_node
when: openshift.common.use_nuage | bool
- role: contiv
diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml
index bb3b1e780..c31aca62b 100644
--- a/playbooks/common/openshift-node/scaleup.yml
+++ b/playbooks/common/openshift-node/scaleup.yml
@@ -27,4 +27,12 @@
- openshift_facts
- openshift_docker
+- include: ../openshift-cluster/disable_excluder.yml
+ tags:
+ - always
+
- include: ../openshift-node/config.yml
+
+- include: ../openshift-cluster/reset_excluder.yml
+ tags:
+ - always
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index 8e46c5919..2625d4d05 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -32,4 +32,5 @@
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
+ openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index 44b0f5a3c..f782d6dab 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -33,5 +33,6 @@
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
+ openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
openshift_use_dnsmasq: false
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
index 1366c83ca..f9ddb9469 100644
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -29,4 +29,5 @@
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
+ openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}"
openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index 20ce47c07..82329eac1 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -340,16 +340,6 @@ resources:
port_range_max: 10250
remote_mode: remote_group_id
- direction: ingress
- protocol: tcp
- port_range_min: 10255
- port_range_max: 10255
- remote_mode: remote_group_id
- - direction: ingress
- protocol: udp
- port_range_min: 10255
- port_range_max: 10255
- remote_mode: remote_group_id
- - direction: ingress
protocol: udp
port_range_min: 4789
port_range_max: 4789
diff --git a/pytest.ini b/pytest.ini
index 502fd1f46..1b0d19bb2 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -9,6 +9,7 @@ python_files =
# is Python unittest's default, while pytest discovers both "test_*.py" and
# "*_test.py" by default.
test_*.py
+ *_test.py
*_tests.py
addopts =
--cov=.
diff --git a/roles/calico/README.md b/roles/calico/README.md
new file mode 100644
index 000000000..99e870521
--- /dev/null
+++ b/roles/calico/README.md
@@ -0,0 +1,28 @@
+# Calico
+
+Configure Calico components for the Master host.
+
+## Requirements
+
+* Ansible 2.2
+
+## Warning: This Calico Integration is in Alpha
+
+Calico shares the etcd instance used by OpenShift, and distributes client etcd certificates to each node.
+For this reason, **we do not (yet) recommend running Calico on any production-like
+cluster, or using it for any purpose besides early access testing.**
+
+## Installation
+
+To install, set the following inventory configuration parameters:
+
+* `openshift_use_calico=True`
+* `openshift_use_openshift_sdn=False`
+* `os_sdn_network_plugin_name='cni'`
+
+
+### Contact Information
+
+Author: Dan Osborne <dan@projectcalico.org>
+
+For support, join the `#openshift` channel on the [calico users slack](calicousers.slack.com).
diff --git a/roles/calico/defaults/main.yaml b/roles/calico/defaults/main.yaml
new file mode 100644
index 000000000..a81fc3af7
--- /dev/null
+++ b/roles/calico/defaults/main.yaml
@@ -0,0 +1,10 @@
+---
+kubeconfig: "{{openshift.common.config_base}}/node/{{ 'system:node:' + openshift.common.hostname }}.kubeconfig"
+etcd_endpoints: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls | join(',') }}"
+
+cni_conf_dir: "/etc/cni/net.d/"
+cni_bin_dir: "/opt/cni/bin/"
+
+calico_etcd_ca_cert_file: "/etc/origin/calico/calico.etcd-ca.crt"
+calico_etcd_cert_file: "/etc/origin/calico/calico.etcd-client.crt"
+calico_etcd_key_file: "/etc/origin/calico/calico.etcd-client.key"
diff --git a/roles/calico/handlers/main.yml b/roles/calico/handlers/main.yml
new file mode 100644
index 000000000..65d75cf00
--- /dev/null
+++ b/roles/calico/handlers/main.yml
@@ -0,0 +1,8 @@
+---
+- name: restart calico
+ become: yes
+ systemd: name=calico state=restarted
+
+- name: restart docker
+ become: yes
+ systemd: name=docker state=restarted
diff --git a/roles/calico/meta/main.yml b/roles/calico/meta/main.yml
new file mode 100644
index 000000000..102b82bde
--- /dev/null
+++ b/roles/calico/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Dan Osborne
+ description: Calico networking
+ company: Tigera, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- role: openshift_facts
diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml
new file mode 100644
index 000000000..287fed321
--- /dev/null
+++ b/roles/calico/tasks/main.yml
@@ -0,0 +1,74 @@
+---
+- include: ../../../roles/etcd_client_certificates/tasks/main.yml
+ vars:
+ etcd_cert_prefix: calico.etcd-
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/calico"
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-calico-{{ openshift.common.hostname }}"
+
+- name: Assure the calico certs have been generated
+ stat:
+ path: "{{ item }}"
+ with_items:
+ - "{{ calico_etcd_ca_cert_file }}"
+ - "{{ calico_etcd_cert_file}}"
+ - "{{ calico_etcd_key_file }}"
+
+- name: Configure Calico service unit file
+ template:
+ dest: "/lib/systemd/system/calico.service"
+ src: calico.service.j2
+
+- name: Enable calico
+ become: yes
+ systemd:
+ name: calico
+ daemon_reload: yes
+ state: started
+ enabled: yes
+ register: start_result
+
+- name: Assure CNI conf dir exists
+ become: yes
+ file: path="{{ cni_conf_dir }}" state=directory
+
+- name: Generate Calico CNI config
+ become: yes
+ template:
+ src: "calico.conf.j2"
+ dest: "{{ cni_conf_dir }}/10-calico.conf"
+
+- name: Assures Kuberentes CNI bin dir exists
+ become: yes
+ file: path="{{ cni_bin_dir }}" state=directory
+
+- name: Download Calico CNI Plugin
+ become: yes
+ get_url:
+ url: https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico
+ dest: "{{ cni_bin_dir }}"
+ mode: a+x
+
+- name: Download Calico IPAM Plugin
+ become: yes
+ get_url:
+ url: https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico-ipam
+ dest: "{{ cni_bin_dir }}"
+ mode: a+x
+
+- name: Download and unzip standard CNI plugins
+ become: yes
+ unarchive:
+ remote_src: True
+ src: https://github.com/containernetworking/cni/releases/download/v0.4.0/cni-amd64-v0.4.0.tgz
+ dest: "{{ cni_bin_dir }}"
+
+- name: Assure Calico conf dir exists
+ become: yes
+ file: path=/etc/calico/ state=directory
+
+- name: Set calicoctl.cfg
+ template:
+ src: calico.cfg.j2
+ dest: "/etc/calico/calicoctl.cfg"
diff --git a/roles/calico/templates/calico.cfg.j2 b/roles/calico/templates/calico.cfg.j2
new file mode 100644
index 000000000..722385ed8
--- /dev/null
+++ b/roles/calico/templates/calico.cfg.j2
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: calicoApiConfig
+metadata:
+spec:
+ datastoreType: "etcdv2"
+ etcdEndpoints: "{{ etcd_endpoints }}"
+ etcdKeyFile: "{{ calico_etcd_key_file }}"
+ etcdCertFile: "{{ calico_etcd_cert_file }}"
+ etcdCaCertFile: "{{ calico_etcd_ca_cert_file }}"
diff --git a/roles/calico/templates/calico.conf.j2 b/roles/calico/templates/calico.conf.j2
new file mode 100644
index 000000000..3c8c6b046
--- /dev/null
+++ b/roles/calico/templates/calico.conf.j2
@@ -0,0 +1,18 @@
+{
+ "name": "calico",
+ "type": "calico",
+ "ipam": {
+ "type": "calico-ipam"
+ },
+ "etcd_endpoints": "{{ etcd_endpoints }}",
+ "etcd_key_file": "{{ calico_etcd_key_file }}",
+ "etcd_cert_file": "{{ calico_etcd_cert_file }}",
+ "etcd_ca_cert_file": "{{ calico_etcd_ca_cert_file }}",
+ "kubernetes": {
+ "kubeconfig": "{{ kubeconfig }}"
+ },
+ "hostname": "{{ openshift.common.hostname }}",
+ "policy": {
+ "type": "k8s"
+ }
+}
diff --git a/roles/calico/templates/calico.service.j2 b/roles/calico/templates/calico.service.j2
new file mode 100644
index 000000000..b882a5597
--- /dev/null
+++ b/roles/calico/templates/calico.service.j2
@@ -0,0 +1,29 @@
+[Unit]
+Description=calico
+After=docker.service
+Requires=docker.service
+
+[Service]
+Restart=always
+ExecStartPre=-/usr/bin/docker rm -f calico-node
+ExecStart=/usr/bin/docker run --net=host --privileged \
+ --name=calico-node \
+ -e WAIT_FOR_DATASTORE=true \
+ -e FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT \
+ -e CALICO_IPV4POOL_IPIP=always \
+ -e FELIX_IPV6SUPPORT=false \
+ -e ETCD_ENDPOINTS={{ etcd_endpoints }} \
+ -v /etc/origin/calico:/etc/origin/calico \
+ -e ETCD_CA_CERT_FILE={{ calico_etcd_ca_cert_file }} \
+ -e ETCD_CERT_FILE={{ calico_etcd_cert_file }} \
+ -e ETCD_KEY_FILE={{ calico_etcd_key_file }} \
+ -e NODENAME={{ openshift.common.hostname }} \
+ -v /var/log/calico:/var/log/calico \
+ -v /lib/modules:/lib/modules \
+ -v /var/run/calico:/var/run/calico \
+ calico/node:v1.1.0
+
+ExecStop=-/usr/bin/docker stop calico-node
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/calico_master/README.md b/roles/calico_master/README.md
new file mode 100644
index 000000000..2d34a967c
--- /dev/null
+++ b/roles/calico_master/README.md
@@ -0,0 +1,28 @@
+# Calico (Master)
+
+Configure Calico components for the Master host.
+
+## Requirements
+
+* Ansible 2.2
+
+## Warning: This Calico Integration is in Alpha
+
+Calico shares the etcd instance used by OpenShift, and distributes client etcd certificates to each node.
+For this reason, **we do not (yet) recommend running Calico on any production-like
+cluster, or using it for any purpose besides early access testing.**
+
+## Installation
+
+To install, set the following inventory configuration parameters:
+
+* `openshift_use_calico=True`
+* `openshift_use_openshift_sdn=False`
+* `os_sdn_network_plugin_name='cni'`
+
+
+### Contact Information
+
+Author: Dan Osborne <dan@projectcalico.org>
+
+For support, join the `#openshift` channel on the [calico users slack](calicousers.slack.com).
diff --git a/roles/calico_master/defaults/main.yaml b/roles/calico_master/defaults/main.yaml
new file mode 100644
index 000000000..db0d17884
--- /dev/null
+++ b/roles/calico_master/defaults/main.yaml
@@ -0,0 +1,2 @@
+---
+kubeconfig: "{{ openshift.common.config_base }}/master/openshift-master.kubeconfig"
diff --git a/roles/calico_master/meta/main.yml b/roles/calico_master/meta/main.yml
new file mode 100644
index 000000000..4d70c79cf
--- /dev/null
+++ b/roles/calico_master/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: Dan Osborne
+ description: Calico networking
+ company: Tigera, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- role: calico
+- role: openshift_facts
diff --git a/roles/calico_master/tasks/main.yml b/roles/calico_master/tasks/main.yml
new file mode 100644
index 000000000..3358abe23
--- /dev/null
+++ b/roles/calico_master/tasks/main.yml
@@ -0,0 +1,41 @@
+---
+- name: Assure the calico certs have been generated
+ stat:
+ path: "{{ item }}"
+ with_items:
+ - "{{ calico_etcd_ca_cert_file }}"
+ - "{{ calico_etcd_cert_file}}"
+ - "{{ calico_etcd_key_file }}"
+
+- name: Create temp directory for policy controller definition
+ command: mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: mktemp
+ changed_when: False
+
+- name: Write Calico Policy Controller definition
+ template:
+ dest: "{{ mktemp.stdout }}/calico-policy-controller.yml"
+ src: calico-policy-controller.yml.j2
+
+- name: Launch Calico Policy Controller
+ command: >
+ {{ openshift.common.client_binary }} create
+ -f {{ mktemp.stdout }}/calico-policy-controller.yml
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ register: calico_create_output
+ failed_when: ('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout)
+ changed_when: ('created' in calico_create_output.stdout)
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+
+
+- name: oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:calico
+ oc_adm_policy_user:
+ user: system:serviceaccount:kube-system:calico
+ resource_kind: scc
+ resource_name: privileged
+ state: present
diff --git a/roles/calico_master/templates/calico-policy-controller.yml.j2 b/roles/calico_master/templates/calico-policy-controller.yml.j2
new file mode 100644
index 000000000..3fb1abf0d
--- /dev/null
+++ b/roles/calico_master/templates/calico-policy-controller.yml.j2
@@ -0,0 +1,105 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: v1
+metadata:
+ name: calico
+ namespace: kube-system
+rules:
+ - apiGroups: [""]
+ resources:
+ - pods
+ - namespaces
+ verbs:
+ - list
+ - get
+ - watch
+ - apiGroups: ["extensions"]
+ resources:
+ - networkpolicies
+ verbs:
+ - list
+ - get
+ - watch
+---
+apiVersion: v1
+kind: ClusterRoleBinding
+metadata:
+ name: calico
+roleRef:
+ name: calico
+subjects:
+- kind: SystemUser
+ name: kube-system:calico
+- kind: ServiceAccount
+ name: calico
+ namespace: kube-system
+userNames:
+ - system:serviceaccount:kube-system:calico
+---
+# This manifest deploys the Calico policy controller on Kubernetes.
+# See https://github.com/projectcalico/k8s-policy
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: calico-policy-controller
+ namespace: kube-system
+ labels:
+ k8s-app: calico-policy
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ scheduler.alpha.kubernetes.io/tolerations: |
+ [{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
+ {"key":"CriticalAddonsOnly", "operator":"Exists"}]
+spec:
+ # The policy controller can only have a single active instance.
+ replicas: 1
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: calico-policy-controller
+ namespace: kube-system
+ labels:
+ k8s-app: calico-policy
+ spec:
+ # The policy controller must run in the host network namespace so that
+ # it isn't governed by policy that would prevent it from working.
+ hostNetwork: true
+ serviceAccountName: calico
+ containers:
+ - name: calico-policy-controller
+ image: quay.io/calico/kube-policy-controller:v0.5.4
+ env:
+ # The location of the Calico etcd cluster.
+ - name: ETCD_ENDPOINTS
+ value: {{ etcd_endpoints }}
+ # Location of the CA certificate for etcd.
+ - name: ETCD_CA_CERT_FILE
+ value: {{ calico_etcd_ca_cert_file }}
+ # Location of the client key for etcd.
+ - name: ETCD_KEY_FILE
+ value: {{ calico_etcd_key_file }}
+ # Location of the client certificate for etcd.
+ - name: ETCD_CERT_FILE
+ value: {{ calico_etcd_cert_file }}
+ # Since we're running in the host namespace and might not have KubeDNS
+ # access, configure the container's /etc/hosts to resolve
+ # kubernetes.default to the correct service clusterIP.
+ - name: CONFIGURE_ETC_HOSTS
+ value: "true"
+ volumeMounts:
+ # Mount in the etcd TLS secrets.
+ - name: certs
+ mountPath: /etc/origin/calico
+
+ volumes:
+ # Mount in the etcd TLS secrets.
+ - name: certs
+ hostPath:
+ path: /etc/origin/calico
diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml
index 8bd68787a..0114498f8 100644
--- a/roles/cockpit-ui/tasks/main.yml
+++ b/roles/cockpit-ui/tasks/main.yml
@@ -1,13 +1,16 @@
---
- block:
- - name: Create passthrough route for docker-registry
+
+ # When openshift_hosted_manage_registry=true the openshift_hosted
+ # role will create the appropriate route for the docker-registry.
+ # When openshift_hosted_manage_registry=false then this code will
+ # not be run.
+ - name: fetch the docker-registry route
oc_route:
kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig"
name: docker-registry
namespace: default
- service_name: docker-registry
- state: present
- tls_termination: passthrough
+ state: list
register: docker_registry_route
- name: Create passthrough route for registry-console
@@ -41,7 +44,7 @@
{% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}
{% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %}
-p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"
- -p REGISTRY_HOST="{{ docker_registry_route.results.results[0].spec.host }}"
+ -p REGISTRY_HOST="{{ docker_registry_route.results[0].spec.host }}"
-p COCKPIT_KUBE_URL="https://{{ registry_console_cockpit_kube.results.results[0].spec.host }}"
--config={{ openshift_hosted_kubeconfig }}
-n default
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index 532f9e313..e0c70a181 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -16,6 +16,7 @@ galaxy_info:
- cloud
- system
dependencies:
+- role: lib_openshift
- role: os_firewall
os_firewall_allow:
- service: etcd
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 3b80164cc..72ffadbd2 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -1,8 +1,4 @@
---
-- name: Load lib_openshift modules
- include_role:
- name: lib_openshift
-
- name: Pull etcd system container
command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }}
register: pull_result
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 990a86c21..9151dd0bd 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -60,3 +60,9 @@ ETCD_PEER_CA_FILE={{ etcd_peer_ca_file }}
ETCD_PEER_CERT_FILE={{ etcd_peer_cert_file }}
ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }}
{% endif -%}
+
+#[logging]
+ETCD_DEBUG="{{ etcd_debug | default(false) | string }}"
+{% if etcd_log_package_levels is defined %}
+ETCD_LOG_PACKAGE_LEVELS="{{ etcd_log_package_levels }}"
+{% endif %}
diff --git a/roles/etcd_client_certificates/tasks/main.yml b/roles/etcd_client_certificates/tasks/main.yml
index 93f4fd53c..450b65209 100644
--- a/roles/etcd_client_certificates/tasks/main.yml
+++ b/roles/etcd_client_certificates/tasks/main.yml
@@ -51,7 +51,7 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ etcd_cert_subdir ~ '/'
~ etcd_cert_prefix ~ 'client.csr' }}"
environment:
- SAN: "IP:{{ etcd_ip }}"
+ SAN: "IP:{{ etcd_ip }},DNS:{{ etcd_hostname }}"
when: etcd_client_certs_missing | bool
delegate_to: "{{ etcd_ca_host }}"
diff --git a/roles/etcd_server_certificates/meta/main.yml b/roles/etcd_server_certificates/meta/main.yml
index b453f2bd8..98c913dba 100644
--- a/roles/etcd_server_certificates/meta/main.yml
+++ b/roles/etcd_server_certificates/meta/main.yml
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
- system
dependencies:
-- role: etcd_ca
+- role: openshift_etcd_ca
diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd_server_certificates/tasks/main.yml
index 4ae9b79c4..956f5cc55 100644
--- a/roles/etcd_server_certificates/tasks/main.yml
+++ b/roles/etcd_server_certificates/tasks/main.yml
@@ -40,7 +40,7 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ etcd_cert_subdir ~ '/'
~ etcd_cert_prefix ~ 'server.csr' }}"
environment:
- SAN: "IP:{{ etcd_ip }}"
+ SAN: "IP:{{ etcd_ip }},DNS:{{ etcd_hostname }}"
when: etcd_server_certs_missing | bool
delegate_to: "{{ etcd_ca_host }}"
@@ -73,7 +73,7 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ etcd_cert_subdir ~ '/'
~ etcd_cert_prefix ~ 'peer.csr' }}"
environment:
- SAN: "IP:{{ etcd_ip }}"
+ SAN: "IP:{{ etcd_ip }},DNS:{{ etcd_hostname }}"
when: etcd_server_certs_missing | bool
delegate_to: "{{ etcd_ca_host }}"
diff --git a/roles/kube_nfs_volumes/README.md b/roles/kube_nfs_volumes/README.md
deleted file mode 100644
index 8cf7c0cd4..000000000
--- a/roles/kube_nfs_volumes/README.md
+++ /dev/null
@@ -1,114 +0,0 @@
-# kube_nfs_volumes
-
-This role is useful to export disks as set of Kubernetes persistent volumes.
-It does so by partitioning the disks, creating ext4 filesystem on each
-partition, mounting the partitions, exporting the mounts via NFS and adding
-these NFS shares as NFS persistent volumes to existing Kubernetes installation.
-
-All partitions on given disks are used as the persistent volumes, including
-already existing partitions! There should be no other data (such as operating
-system) on the disks!
-
-## Requirements
-
-* Ansible 2.2
-* Running Kubernetes with NFS persistent volume support (on a remote machine).
-* Works only on RHEL/Fedora-like distros.
-
-## Role Variables
-
-```
-# Options of NFS exports.
-nfs_export_options: "*(rw,no_root_squash,insecure,no_subtree_check)"
-
-# Directory, where the created partitions should be mounted. They will be
-# mounted as <mount_dir>/sda1 etc.
-mount_dir: /exports
-
-# Comma-separated list of disks to partition.
-# This role always assumes that all partitions on these disks are used as
-# physical volumes.
-disks: /dev/sdb,/dev/sdc
-
-# Whether to re-partition already partitioned disks.
-# Even though the disks won't get repartitioned on 'false', all existing
-# partitions on the disk are exported via NFS as physical volumes!
-force: false
-
-# Specification of size of partitions to create. See library/partitionpool.py
-# for details.
-sizes: 100M
-
-# URL of Kubernetes API server, incl. port.
-kubernetes_url: https://10.245.1.2:6443
-
-# Token to use for authentication to the API server
-kubernetes_token: tJdce6Fn3cL1112YoIJ5m2exzAbzcPZX
-
-# API Version to use for kubernetes
-kube_api_version: v1
-```
-
-## Dependencies
-
-None
-
-## Example Playbook
-
-With this playbook, `/dev/sdb` is partitioned into 100MiB partitions, all of
-them are mounted into `/exports/sdb<N>` directory and all these directories
-are exported via NFS and added as physical volumes to Kubernetes running at
-`https://10.245.1.2:6443`.
-
- - hosts: servers
- roles:
- - role: kube_nfs_volumes
- disks: "/dev/sdb"
- sizes: 100M
- kubernetes_url: https://10.245.1.2:6443
- kubernetes_token: tJdce6Fn3cL1112YoIJ5m2exzAbzcPZX
-
-See library/partitionpool.py for details how `sizes` parameter can be used
-to create partitions of various sizes.
-
-## Full example
-Let's say there are two machines, 10.0.0.1 and 10.0.0.2, that we want to use as
-NFS servers for our Kubernetes cluster, running Kubernetes public API at
-https://10.245.1.2:6443.
-
-Both servers have three 1 TB disks, /dev/sda for the system and /dev/sdb and
-/dev/sdc to be partitioned. We want to split the data disks into 5, 10 and
-20 GiB partitions so that 10% of the total capacity is in 5 GiB partitions, 40%
-in 10 GiB and 50% in 20 GiB partitions.
-
-That means, each data disk will have 20x 5 GiB, 40x 10 GiB and 25x 20 GiB
-partitions.
-
-* Create an `inventory` file:
- ```
- [nfsservers]
- 10.0.0.1
- 10.0.0.2
- ```
-
-* Create an ansible playbook, say `setupnfs.yaml`:
- ```
- - hosts: nfsservers
- become: yes
- roles:
- - role: kube_nfs_volumes
- disks: "/dev/sdb,/dev/sdc"
- sizes: 5G:10,10G:40,20G:50
- force: no
- kubernetes_url: https://10.245.1.2:6443
- kubernetes_token: tJdce6Fn3cL1112YoIJ5m2exzAbzcPZX
- ```
-
-* Run the playbook:
- ```
- ansible-playbook -i inventory setupnfs.yml
- ```
-
-## License
-
-Apache 2.0
diff --git a/roles/kube_nfs_volumes/defaults/main.yml b/roles/kube_nfs_volumes/defaults/main.yml
deleted file mode 100644
index bdd994d07..000000000
--- a/roles/kube_nfs_volumes/defaults/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-kubernetes_url: https://172.30.0.1:443
-
-kube_api_version: v1
-
-kube_req_template: "../templates/{{ kube_api_version }}/nfs.json.j2"
-
-# Options of NFS exports.
-nfs_export_options: "*(rw,no_root_squash,insecure,no_subtree_check)"
-
-# Directory, where the created partitions should be mounted. They will be
-# mounted as <mount_dir>/sda1 etc.
-mount_dir: /exports
-
-# Force re-partitioning the disks
-force: false
diff --git a/roles/kube_nfs_volumes/handlers/main.yml b/roles/kube_nfs_volumes/handlers/main.yml
deleted file mode 100644
index 9ce8b783d..000000000
--- a/roles/kube_nfs_volumes/handlers/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: restart nfs
- systemd: name=nfs-server state=restarted
diff --git a/roles/kube_nfs_volumes/library/partitionpool.py b/roles/kube_nfs_volumes/library/partitionpool.py
deleted file mode 100644
index 1857433c7..000000000
--- a/roles/kube_nfs_volumes/library/partitionpool.py
+++ /dev/null
@@ -1,247 +0,0 @@
-#!/usr/bin/python
-"""
-Ansible module for partitioning.
-"""
-
-from __future__ import print_function
-
-# There is no pyparted on our Jenkins worker
-# pylint: disable=import-error
-import parted
-
-DOCUMENTATION = """
----
-module: partitionpool
-short_description; Partition a disk into parititions.
-description:
- - Creates partitions on given disk based on partition sizes and their weights.
- Unless 'force' option is set to True, it ignores already partitioned disks.
-
- When the disk is empty or 'force' is set to True, it always creates a new
- GPT partition table on the disk. Then it creates number of partitions, based
- on their weights.
-
- This module should be used when a system admin wants to split existing disk(s)
- into pools of partitions of specific sizes. It is not intended as generic disk
- partitioning module!
-
- Independent on 'force' parameter value and actual disk state, the task
- always fills 'partition_pool' fact with all partitions on given disks,
- together with their sizes (in bytes). E.g.:
- partition_sizes = [
- { name: sda1, Size: 1048576000 },
- { name: sda2, Size: 1048576000 },
- { name: sdb1, Size: 1048576000 },
- ...
- ]
-
-options:
- disk:
- description:
- - Disk to partition.
- size:
- description:
- - Sizes of partitions to create and their weights. Has form of:
- <size1>[:<weigth1>][,<size2>[:<weight2>][,...]]
- - Any <size> can end with 'm' or 'M' for megabyte, 'g/G' for gigabyte
- and 't/T' for terabyte. Megabyte is used when no unit is specified.
- - If <weight> is missing, 1.0 is used.
- - From each specified partition <sizeX>, number of these partitions are
- created so they occupy spaces represented by <weightX>, proportionally to
- other weights.
-
- - Example 1: size=100G says, that the whole disk is split in number of 100 GiB
- partitions. On 1 TiB disk, 10 partitions will be created.
-
- - Example 2: size=100G:1,10G:1 says that ratio of space occupied by 100 GiB
- partitions and 10 GiB partitions is 1:1. Therefore, on 1 TiB disk, 500 GiB
- will be split into five 100 GiB partition and 500 GiB will be split into fifty
- 10GiB partitions.
- - size=100G:1,10G:1 = 5x 100 GiB and 50x 10 GiB partitions (on 1 TiB disk).
-
- - Example 3: size=200G:1,100G:2 says that the ratio of space occupied by 200 GiB
- partitions and 100GiB partition is 1:2. Therefore, on 1 TiB disk, 1/3
- (300 GiB) should be occupied by 200 GiB partitions. Only one fits there,
- so only one is created (we always round nr. of partitions *down*). The rest
- (800 GiB) is split into eight 100 GiB partitions, even though it's more
- than 2/3 of total space - free space is always allocated as much as possible.
- - size=200G:1,100G:2 = 1x 200 GiB and 8x 100 GiB partitions (on 1 TiB disk).
-
- - Example: size=200G:1,100G:1,50G:1 says that the ratio of space occupied by
- 200 GiB, 100 GiB and 50 GiB partitions is 1:1:1. Therefore 1/3 of 1 TiB disk
- is dedicated to 200 GiB partitions. Only one fits there and only one is
- created. The rest (800 GiB) is distributed according to remaining weights:
- 100 GiB vs 50 GiB is 1:1, we create four 100 GiB partitions (400 GiB in total)
- and eight 50 GiB partitions (again, 400 GiB).
- - size=200G:1,100G:1,50G:1 = 1x 200 GiB, 4x 100 GiB and 8x 50 GiB partitions
- (on 1 TiB disk).
-
- force:
- description:
- - If True, it will always overwite partition table on the disk and create new one.
- - If False (default), it won't change existing partition tables.
-
-"""
-
-
-# It's not class, it's more a simple struct with almost no functionality.
-# pylint: disable=too-few-public-methods
-class PartitionSpec(object):
- """ Simple class to represent required partitions."""
- def __init__(self, size, weight):
- """ Initialize the partition specifications."""
- # Size of the partitions
- self.size = size
- # Relative weight of this request
- self.weight = weight
- # Number of partitions to create, will be calculated later
- self.count = -1
-
- def set_count(self, count):
- """ Set count of parititions of this specification. """
- self.count = count
-
-
-def assign_space(total_size, specs):
- """
- Satisfy all the PartitionSpecs according to their weight.
- In other words, calculate spec.count of all the specs.
- """
- total_weight = 0.0
- for spec in specs:
- total_weight += float(spec.weight)
-
- for spec in specs:
- num_blocks = int((float(spec.weight) / total_weight) * (total_size / float(spec.size)))
- spec.set_count(num_blocks)
- total_size -= num_blocks * spec.size
- total_weight -= spec.weight
-
-
-def partition(diskname, specs, force=False, check_mode=False):
- """
- Create requested partitions.
- Returns nr. of created partitions or 0 when the disk was already partitioned.
- """
- count = 0
-
- dev = parted.getDevice(diskname)
- try:
- disk = parted.newDisk(dev)
- except parted.DiskException:
- # unrecognizable format, treat as empty disk
- disk = None
-
- if disk and len(disk.partitions) > 0 and not force:
- print("skipping", diskname)
- return 0
-
- # create new partition table, wiping all existing data
- disk = parted.freshDisk(dev, 'gpt')
- # calculate nr. of partitions of each size
- assign_space(dev.getSize(), specs)
- last_megabyte = 1
- for spec in specs:
- for _ in range(spec.count):
- # create the partition
- start = parted.sizeToSectors(last_megabyte, "MiB", dev.sectorSize)
- length = parted.sizeToSectors(spec.size, "MiB", dev.sectorSize)
- geo = parted.Geometry(device=dev, start=start, length=length)
- filesystem = parted.FileSystem(type='ext4', geometry=geo)
- part = parted.Partition(
- disk=disk,
- type=parted.PARTITION_NORMAL,
- fs=filesystem,
- geometry=geo)
- disk.addPartition(partition=part, constraint=dev.optimalAlignedConstraint)
- last_megabyte += spec.size
- count += 1
- try:
- if not check_mode:
- disk.commit()
- except parted.IOException:
- # partitions have been written, but we have been unable to inform the
- # kernel of the change, probably because they are in use.
- # Ignore it and hope for the best...
- pass
- return count
-
-
-def parse_spec(text):
- """ Parse string with partition specification. """
- tokens = text.split(",")
- specs = []
- for token in tokens:
- if ":" not in token:
- token += ":1"
-
- (sizespec, weight) = token.split(':')
- weight = float(weight) # throws exception with reasonable error string
-
- units = {"m": 1, "g": 1 << 10, "t": 1 << 20, "p": 1 << 30}
- unit = units.get(sizespec[-1].lower(), None)
- if not unit:
- # there is no unit specifier, it must be just the number
- size = float(sizespec)
- unit = 1
- else:
- size = float(sizespec[:-1])
- spec = PartitionSpec(int(size * unit), weight)
- specs.append(spec)
- return specs
-
-
-def get_partitions(diskpath):
- """ Return array of partition names for given disk """
- dev = parted.getDevice(diskpath)
- disk = parted.newDisk(dev)
- partitions = []
- for part in disk.partitions:
- (_, _, pname) = part.path.rsplit("/")
- partitions.append({"name": pname, "size": part.getLength() * dev.sectorSize})
-
- return partitions
-
-
-def main():
- """ Ansible module main method. """
- module = AnsibleModule( # noqa: F405
- argument_spec=dict(
- disks=dict(required=True, type='str'),
- force=dict(required=False, default="no", type='bool'),
- sizes=dict(required=True, type='str')
- ),
- supports_check_mode=True,
- )
-
- disks = module.params['disks']
- force = module.params['force']
- if force is None:
- force = False
- sizes = module.params['sizes']
-
- try:
- specs = parse_spec(sizes)
- except ValueError as ex:
- err = "Error parsing sizes=" + sizes + ": " + str(ex)
- module.fail_json(msg=err)
-
- partitions = []
- changed_count = 0
- for disk in disks.split(","):
- try:
- changed_count += partition(disk, specs, force, module.check_mode)
- except Exception as ex:
- err = "Error creating partitions on " + disk + ": " + str(ex)
- raise
- # module.fail_json(msg=err)
- partitions += get_partitions(disk)
-
- module.exit_json(changed=(changed_count > 0), ansible_facts={"partition_pool": partitions})
-
-
-# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-order, wrong-import-position
-# import module snippets
-from ansible.module_utils.basic import * # noqa: E402,F403
-main()
diff --git a/roles/kube_nfs_volumes/meta/main.yml b/roles/kube_nfs_volumes/meta/main.yml
deleted file mode 100644
index 7ed028138..000000000
--- a/roles/kube_nfs_volumes/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-galaxy_info:
- author: Jan Safranek
- description: Partition disks and use them as Kubernetes NFS physical volumes.
- company: Red Hat, Inc.
- license: license (Apache)
- min_ansible_version: 2.2
- platforms:
- - name: EL
- versions:
- - 7
- - name: Fedora
- versions:
- - all
- categories:
- - cloud
-dependencies: []
diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml
deleted file mode 100644
index 67f709c8c..000000000
--- a/roles/kube_nfs_volumes/tasks/main.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- fail:
- msg: "This role is not yet supported on atomic hosts"
- when: openshift.common.is_atomic | bool
-
-- name: Install pyparted (RedHat/Fedora)
- package: name={{ item }} state=present
- with_items:
- - pyparted
- - python-httplib2
- when: not openshift.common.is_containerized | bool
-
-- name: partition the drives
- partitionpool: disks={{ disks }} force={{ force }} sizes={{ sizes }}
-
-- name: create filesystem
- filesystem: fstype=ext4 dev=/dev/{{ item.name }}
- with_items: "{{ partition_pool }}"
-
-- name: mount
- mount: name={{mount_dir}}/{{ item.name }} src=/dev/{{ item.name }} state=mounted fstype=ext4 passno=2
- with_items: "{{ partition_pool }}"
-
-- include: nfs.yml
-
-- name: export physical volumes
- uri:
- url: "{{ kubernetes_url }}/api/{{ kube_api_version }}/persistentvolumes"
- method: POST
- body: "{{ lookup('template', kube_req_template) }}"
- body_format: json
- status_code: 201
- HEADER_Authorization: "Bearer {{ kubernetes_token }}"
- with_items: "{{ partition_pool }}"
diff --git a/roles/kube_nfs_volumes/tasks/nfs.yml b/roles/kube_nfs_volumes/tasks/nfs.yml
deleted file mode 100644
index 9eeff9260..000000000
--- a/roles/kube_nfs_volumes/tasks/nfs.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Install NFS server
- package: name=nfs-utils state=present
- when: not openshift.common.is_containerized | bool
-
-- name: Start rpcbind on Fedora/Red Hat
- systemd:
- name: rpcbind
- state: started
- enabled: yes
-
-- name: Start nfs on Fedora/Red Hat
- systemd:
- name: nfs-server
- state: started
- enabled: yes
-
-- name: Export the directories
- lineinfile: dest=/etc/exports
- regexp="^{{ mount_dir }}/{{ item.name }} "
- line="{{ mount_dir }}/{{ item.name }} {{nfs_export_options}}"
- with_items: "{{ partition_pool }}"
- notify: restart nfs
diff --git a/roles/kube_nfs_volumes/templates/v1/nfs.json.j2 b/roles/kube_nfs_volumes/templates/v1/nfs.json.j2
deleted file mode 120000
index 49c1191bc..000000000
--- a/roles/kube_nfs_volumes/templates/v1/nfs.json.j2
+++ /dev/null
@@ -1 +0,0 @@
-../v1beta3/nfs.json.j2 \ No newline at end of file
diff --git a/roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2 b/roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2
deleted file mode 100644
index b42886ef1..000000000
--- a/roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "kind": "PersistentVolume",
- "apiVersion": "v1beta3",
- "metadata": {
- "name": "pv-{{ inventory_hostname | regex_replace("\.", "-") }}-{{ item.name }}",
- "labels": {
- "type": "nfs"
- }
- },
- "spec": {
- "capacity": {
- "storage": "{{ item.size }}"
- },
- "accessModes": [
- "ReadWriteOnce"
- ],
- "NFS": {
- "Server": "{{ inventory_hostname }}",
- "Path": "{{ mount_dir }}/{{ item.name }}",
- "ReadOnly": false
- }
- }
-}
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 4ecfd2bff..c69f5deda 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -130,6 +130,12 @@ options:
required: false
default: True
aliases: []
+ expire_days:
+ description
+ - Validity of the certificate in days
+ required: false
+ default: None
+ aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
@@ -149,17 +155,15 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/ca_server_cert -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -184,13 +188,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -206,13 +210,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -234,7 +238,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -323,7 +327,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -423,7 +427,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -542,8 +546,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -604,7 +608,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -630,7 +644,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -662,114 +676,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
- if module.params['src']:
+ state = params['state']
+
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
- if rval[0] and module.params['src']:
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -870,11 +919,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -892,7 +945,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -909,13 +962,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -935,9 +988,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -952,10 +1005,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -968,16 +1021,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1058,9 +1111,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1082,7 +1135,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1456,6 +1509,9 @@ class CAServerCert(OpenShiftCLI):
if proc.returncode == 0:
regex = re.compile(r"^\s*X509v3 Subject Alternative Name:\s*?\n\s*(.*)\s*\n", re.MULTILINE)
match = regex.search(x509output) # E501
+ if not match:
+ return False
+
for entry in re.split(r", *", match.group(1)):
if entry.startswith('DNS') or entry.startswith('IP Address'):
cert_names.append(entry.split(':')[1])
@@ -1480,6 +1536,7 @@ class CAServerCert(OpenShiftCLI):
'signer_cert': {'value': params['signer_cert'], 'include': True},
'signer_key': {'value': params['signer_key'], 'include': True},
'signer_serial': {'value': params['signer_serial'], 'include': True},
+ 'expire_days': {'value': params['expire_days'], 'include': True},
'backup': {'value': params['backup'], 'include': False},
})
@@ -1501,7 +1558,7 @@ class CAServerCert(OpenShiftCLI):
api_rval = server_cert.create()
if api_rval['returncode'] != 0:
- return {'Failed': True, 'msg': api_rval}
+ return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
@@ -1538,6 +1595,7 @@ def main():
signer_key=dict(default='/etc/origin/master/ca.key', type='str'),
signer_serial=dict(default='/etc/origin/master/ca.serial.txt', type='str'),
hostnames=dict(default=[], type='list'),
+ expire_days=dict(default=None, type='int'),
),
supports_check_mode=True,
)
diff --git a/roles/lib_openshift/library/oadm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 8bb0538c0..48e80a7cd 100644
--- a/roles/lib_openshift/library/oadm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -54,7 +54,7 @@ from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
-module: oadm_manage_node
+module: oc_adm_manage_node
short_description: Module to manage openshift nodes
description:
- Manage openshift nodes programmatically.
@@ -126,13 +126,13 @@ extends_documentation_fragment: []
EXAMPLES = '''
- name: oadm manage-node --schedulable=true --selector=ops_node=new
- oadm_manage_node:
+ oc_adm_manage_node:
selector: ops_node=new
schedulable: True
register: schedout
- name: oadm manage-node my-k8s-node-5 --evacuate
- oadm_manage_node:
+ oc_adm_manage_node:
node: my-k8s-node-5
evacuate: True
force: True
@@ -141,17 +141,15 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/manage_node -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -176,13 +174,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -198,13 +196,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -226,7 +224,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -315,7 +313,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -415,7 +413,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -534,8 +532,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -596,7 +594,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -622,7 +630,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -654,114 +662,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
- if module.params['src']:
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
- if rval[0] and module.params['src']:
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -862,11 +905,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -884,7 +931,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -901,13 +948,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -927,9 +974,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -944,10 +991,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -960,16 +1007,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1050,9 +1097,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1074,7 +1121,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1369,7 +1416,7 @@ class OpenShiftCLIConfig(object):
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
-# -*- -*- -*- Begin included fragment: class/oadm_manage_node.py -*- -*- -*-
+# -*- -*- -*- Begin included fragment: class/oc_adm_manage_node.py -*- -*- -*-
class ManageNodeException(Exception):
@@ -1414,7 +1461,7 @@ class ManageNode(OpenShiftCLI):
if selector:
_sel = selector
- results = self._get('node', rname=_node, selector=_sel)
+ results = self._get('node', name=_node, selector=_sel)
if results['returncode'] != 0:
return results
@@ -1578,9 +1625,9 @@ class ManageNode(OpenShiftCLI):
return {'changed': changed, 'results': results, 'state': "present"}
-# -*- -*- -*- End included fragment: class/oadm_manage_node.py -*- -*- -*-
+# -*- -*- -*- End included fragment: class/oc_adm_manage_node.py -*- -*- -*-
-# -*- -*- -*- Begin included fragment: ansible/oadm_manage_node.py -*- -*- -*-
+# -*- -*- -*- Begin included fragment: ansible/oc_adm_manage_node.py -*- -*- -*-
def main():
@@ -1618,4 +1665,4 @@ def main():
if __name__ == "__main__":
main()
-# -*- -*- -*- End included fragment: ansible/oadm_manage_node.py -*- -*- -*-
+# -*- -*- -*- End included fragment: ansible/oc_adm_manage_node.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index 28df1cf94..35168d1a3 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -127,17 +127,15 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/policy_group -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -162,13 +160,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -184,13 +182,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -212,7 +210,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -301,7 +299,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -401,7 +399,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -520,8 +518,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -582,7 +580,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -608,7 +616,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -640,114 +648,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
- if module.params['src']:
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
- if rval[0] and module.params['src']:
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -848,11 +891,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -870,7 +917,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -887,13 +934,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -913,9 +960,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -930,10 +977,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -946,16 +993,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1036,9 +1083,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1060,7 +1107,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1908,6 +1955,28 @@ class PolicyGroup(OpenShiftCLI):
self.verbose = verbose
self._rolebinding = None
self._scc = None
+ self._cluster_policy_bindings = None
+ self._policy_bindings = None
+
+ @property
+ def policybindings(self):
+ if self._policy_bindings is None:
+ results = self._get('clusterpolicybindings', None)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Could not retrieve policybindings')
+ self._policy_bindings = results['results'][0]['items'][0]
+
+ return self._policy_bindings
+
+ @property
+ def clusterpolicybindings(self):
+ if self._cluster_policy_bindings is None:
+ results = self._get('clusterpolicybindings', None)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Could not retrieve clusterpolicybindings')
+ self._cluster_policy_bindings = results['results'][0]['items'][0]
+
+ return self._cluster_policy_bindings
@property
def role_binding(self):
@@ -1948,18 +2017,24 @@ class PolicyGroup(OpenShiftCLI):
def exists_role_binding(self):
''' return whether role_binding exists '''
- results = self.get()
- if results['returncode'] == 0:
- self.role_binding = RoleBinding(results['results'][0])
- if self.role_binding.find_group_name(self.config.config_options['group']['value']) != None:
- return True
+ bindings = None
+ if self.config.config_options['resource_kind']['value'] == 'cluster-role':
+ bindings = self.clusterpolicybindings
+ else:
+ bindings = self.policybindings
+ if bindings is None:
return False
- elif self.config.config_options['name']['value'] in results['stderr'] and '" not found' in results['stderr']:
- return False
+ for binding in bindings['roleBindings']:
+ _rb = binding['roleBinding']
+ if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \
+ _rb['groupNames'] is not None and \
+ self.config.config_options['group']['value'] in _rb['groupNames']:
+ self.role_binding = binding
+ return True
- return results
+ return False
def exists_scc(self):
''' return whether scc exists '''
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index 9b26a2159..5f7e4b8fa 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -127,17 +127,15 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/policy_user -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -162,13 +160,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -184,13 +182,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -212,7 +210,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -301,7 +299,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -401,7 +399,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -520,8 +518,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -582,7 +580,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -608,7 +616,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -640,114 +648,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
- if module.params['src']:
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
- if rval[0] and module.params['src']:
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -848,11 +891,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -870,7 +917,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -887,13 +934,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -913,9 +960,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -930,10 +977,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -946,16 +993,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1036,9 +1083,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1060,7 +1107,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1907,6 +1954,28 @@ class PolicyUser(OpenShiftCLI):
self.verbose = verbose
self._rolebinding = None
self._scc = None
+ self._cluster_policy_bindings = None
+ self._policy_bindings = None
+
+ @property
+ def policybindings(self):
+ if self._policy_bindings is None:
+ results = self._get('clusterpolicybindings', None)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Could not retrieve policybindings')
+ self._policy_bindings = results['results'][0]['items'][0]
+
+ return self._policy_bindings
+
+ @property
+ def clusterpolicybindings(self):
+ if self._cluster_policy_bindings is None:
+ results = self._get('clusterpolicybindings', None)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Could not retrieve clusterpolicybindings')
+ self._cluster_policy_bindings = results['results'][0]['items'][0]
+
+ return self._cluster_policy_bindings
@property
def role_binding(self):
@@ -1929,36 +1998,37 @@ class PolicyUser(OpenShiftCLI):
self._scc = scc
def get(self):
- '''fetch the desired kind'''
+ '''fetch the desired kind
+
+ This is only used for scc objects.
+ The {cluster}rolebindings happen in exists.
+ '''
resource_name = self.config.config_options['name']['value']
if resource_name == 'cluster-reader':
resource_name += 's'
- # oc adm policy add-... creates policy bindings with the name
- # "[resource_name]-binding", however some bindings in the system
- # simply use "[resource_name]". So try both.
-
- results = self._get(self.config.kind, resource_name)
- if results['returncode'] == 0:
- return results
-
- # Now try -binding naming convention
- return self._get(self.config.kind, resource_name + "-binding")
+ return self._get(self.config.kind, resource_name)
def exists_role_binding(self):
''' return whether role_binding exists '''
- results = self.get()
- if results['returncode'] == 0:
- self.role_binding = RoleBinding(results['results'][0])
- if self.role_binding.find_user_name(self.config.config_options['user']['value']) != None:
- return True
+ bindings = None
+ if self.config.config_options['resource_kind']['value'] == 'cluster-role':
+ bindings = self.clusterpolicybindings
+ else:
+ bindings = self.policybindings
+ if bindings is None:
return False
- elif self.config.config_options['name']['value'] in results['stderr'] and '" not found' in results['stderr']:
- return False
+ for binding in bindings['roleBindings']:
+ _rb = binding['roleBinding']
+ if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \
+ _rb['userNames'] is not None and \
+ self.config.config_options['user']['value'] in _rb['userNames']:
+ self.role_binding = binding
+ return True
- return results
+ return False
def exists_scc(self):
''' return whether scc exists '''
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index c398c5551..a6718d921 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -245,17 +245,15 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/registry -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -280,13 +278,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -302,13 +300,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -330,7 +328,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -419,7 +417,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -519,7 +517,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -638,8 +636,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -700,7 +698,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -726,7 +734,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -758,114 +766,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
- if module.params['src']:
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
- if rval[0] and module.params['src']:
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -966,11 +1009,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -988,7 +1035,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1005,13 +1052,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -1031,9 +1078,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1048,10 +1095,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1064,16 +1111,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1154,9 +1201,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1178,7 +1225,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -2061,7 +2108,7 @@ class Service(Yedit):
# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*-
class Volume(object):
- ''' Class to model an openshift volume object'''
+ ''' Class to represent an openshift volume object'''
volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
"dc": "spec.template.spec.containers[0].volumeMounts",
"rc": "spec.template.spec.containers[0].volumeMounts",
@@ -2093,6 +2140,11 @@ class Volume(object):
elif volume_type == 'hostpath':
volume['hostPath'] = {}
volume['hostPath']['path'] = volume_info['path']
+ elif volume_type == 'configmap':
+ volume['configMap'] = {}
+ volume['configMap']['name'] = volume_info['configmap_name']
+ volume_mount = {'mountPath': volume_info['path'],
+ 'name': volume_info['name']}
return (volume, volume_mount)
@@ -2235,8 +2287,8 @@ class Registry(OpenShiftCLI):
''' prepared_registry property '''
if not self.__prepared_registry:
results = self.prepare_registry()
- if not results:
- raise RegistryException('Could not perform registry preparation.')
+ if not results or ('returncode' in results and results['returncode'] != 0):
+ raise RegistryException('Could not perform registry preparation. {}'.format(results))
self.__prepared_registry = results
return self.__prepared_registry
@@ -2253,7 +2305,7 @@ class Registry(OpenShiftCLI):
rval = 0
for part in self.registry_parts:
- result = self._get(part['kind'], rname=part['name'])
+ result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
@@ -2301,8 +2353,8 @@ class Registry(OpenShiftCLI):
# probably need to parse this
# pylint thinks results is a string
# pylint: disable=no-member
- if results['returncode'] != 0 and 'items' in results['results']:
- return results
+ if results['returncode'] != 0 and 'items' not in results['results']:
+ raise RegistryException('Could not perform registry preparation. {}'.format(results))
service = None
deploymentconfig = None
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index ab06a5141..0e4b336fb 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -270,17 +270,15 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/router -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -305,13 +303,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -327,13 +325,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -355,7 +353,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -444,7 +442,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -544,7 +542,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -663,8 +661,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -725,7 +723,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -751,7 +759,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -783,114 +791,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
- if module.params['src']:
+ state = params['state']
+
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
- if rval[0] and module.params['src']:
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -991,11 +1034,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -1013,7 +1060,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1030,13 +1077,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -1056,9 +1103,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -1073,10 +1120,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -1089,16 +1136,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1179,9 +1226,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1203,7 +1250,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -2642,7 +2689,7 @@ class Router(OpenShiftCLI):
self.secret = None
self.rolebinding = None
for part in self.router_parts:
- result = self._get(part['kind'], rname=part['name'])
+ result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
new file mode 100644
index 000000000..a34ce351e
--- /dev/null
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -0,0 +1,1807 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/clusterrole -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_clusterrole
+short_description: Modify, and idempotently manage openshift clusterroles
+description:
+ - Manage openshift clusterroles
+options:
+ state:
+ description:
+ - Supported states, present, absent, list
+ - present - will ensure object is created or updated to the value specified
+ - list - will return a clusterrole
+ - absent - will remove a clusterrole
+ required: False
+ default: present
+ choices: ["present", 'absent', 'list']
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ rules:
+ description:
+ - A list of dictionaries that have the rule parameters.
+ - e.g. rules=[{'apiGroups': [""], 'attributeRestrictions': None, 'verbs': ['get'], 'resources': []}]
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: query a list of env vars on dc
+ oc_clusterrole:
+ name: myclusterrole
+ state: list
+
+- name: Set the following variables.
+ oc_clusterrole:
+ name: myclusterrole
+ rules:
+ apiGroups:
+ - ""
+ attributeRestrictions: null
+ verbs: []
+ resources: []
+'''
+
+# -*- -*- -*- End included fragment: doc/clusterrole -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+
+class YeditException(Exception): # pragma: no cover
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object): # pragma: no cover
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for separator '''
+ return self._separator
+
+ @separator.setter
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is not None:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ elif isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.safe_load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
+
+ return inc_value
+
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
+
+ if params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and state != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': state}
+
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
+ else:
+ rval = yamlfile.delete(params['key'])
+
+ if rval[0] and params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
+
+ elif state == 'present':
+ # check if content is different than what is in the file
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+
+ yamlfile.yaml_dict = content
+
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
+
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
+
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
+ yamlfile.write()
+
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
+
+ # no edits to make
+ if params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': state}
+
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, name=None, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, name=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ cmd.append('--schedulable={}'.format(schedulable))
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ if grace_period:
+ cmd.append('--grace-period={}'.format(int(grace_period)))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode(), stderr.decode()
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object): # pragma: no cover
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/rule.py -*- -*- -*-
+
+
+class Rule(object):
+ '''class to represent a clusterrole rule
+
+ Example Rule Object's yaml:
+ - apiGroups:
+ - ""
+ attributeRestrictions: null
+ resources:
+ - persistentvolumes
+ verbs:
+ - create
+ - delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
+
+ '''
+ def __init__(self,
+ api_groups=None,
+ attr_restrictions=None,
+ resources=None,
+ verbs=None):
+ self.__api_groups = api_groups if api_groups is not None else [""]
+ self.__verbs = verbs if verbs is not None else []
+ self.__resources = resources if resources is not None else []
+ self.__attribute_restrictions = attr_restrictions if attr_restrictions is not None else None
+
+ @property
+ def verbs(self):
+ '''property for verbs'''
+ if self.__verbs is None:
+ return []
+
+ return self.__verbs
+
+ @verbs.setter
+ def verbs(self, data):
+ '''setter for verbs'''
+ self.__verbs = data
+
+ @property
+ def api_groups(self):
+ '''property for api_groups'''
+ if self.__api_groups is None:
+ return []
+ return self.__api_groups
+
+ @api_groups.setter
+ def api_groups(self, data):
+ '''setter for api_groups'''
+ self.__api_groups = data
+
+ @property
+ def resources(self):
+ '''property for resources'''
+ if self.__resources is None:
+ return []
+
+ return self.__resources
+
+ @resources.setter
+ def resources(self, data):
+ '''setter for resources'''
+ self.__resources = data
+
+ @property
+ def attribute_restrictions(self):
+ '''property for attribute_restrictions'''
+ return self.__attribute_restrictions
+
+ @attribute_restrictions.setter
+ def attribute_restrictions(self, data):
+ '''setter for attribute_restrictions'''
+ self.__attribute_restrictions = data
+
+ def add_verb(self, inc_verb):
+ '''add a verb to the verbs array'''
+ self.verbs.append(inc_verb)
+
+ def add_api_group(self, inc_apigroup):
+ '''add an api_group to the api_groups array'''
+ self.api_groups.append(inc_apigroup)
+
+ def add_resource(self, inc_resource):
+ '''add an resource to the resources array'''
+ self.resources.append(inc_resource)
+
+ def remove_verb(self, inc_verb):
+ '''add a verb to the verbs array'''
+ try:
+ self.verbs.remove(inc_verb)
+ return True
+ except ValueError:
+ pass
+
+ return False
+
+ def remove_api_group(self, inc_api_group):
+ '''add a verb to the verbs array'''
+ try:
+ self.api_groups.remove(inc_api_group)
+ return True
+ except ValueError:
+ pass
+
+ return False
+
+ def remove_resource(self, inc_resource):
+ '''add a verb to the verbs array'''
+ try:
+ self.resources.remove(inc_resource)
+ return True
+ except ValueError:
+ pass
+
+ return False
+
+ def __eq__(self, other):
+ '''return whether rules are equal'''
+ return (self.attribute_restrictions == other.attribute_restrictions and
+ self.api_groups == other.api_groups and
+ self.resources == other.resources and
+ self.verbs == other.verbs)
+
+
+ @staticmethod
+ def parse_rules(inc_rules):
+ '''create rules from an array'''
+
+ results = []
+ for rule in inc_rules:
+ results.append(Rule(rule['apiGroups'],
+ rule['attributeRestrictions'],
+ rule['resources'],
+ rule['verbs']))
+
+ return results
+
+# -*- -*- -*- End included fragment: lib/rule.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/clusterrole.py -*- -*- -*-
+
+
+# pylint: disable=too-many-public-methods
+class ClusterRole(Yedit):
+ ''' Class to model an openshift ClusterRole'''
+ rules_path = "rules"
+
+ def __init__(self, name=None, content=None):
+ ''' Constructor for clusterrole '''
+ if content is None:
+ content = ClusterRole.builder(name).yaml_dict
+
+ super(ClusterRole, self).__init__(content=content)
+
+ self.__rules = Rule.parse_rules(self.get(ClusterRole.rules_path)) or []
+
+ @property
+ def rules(self):
+ return self.__rules
+
+ @rules.setter
+ def rules(self, data):
+ self.__rules = data
+ self.put(ClusterRole.rules_path, self.__rules)
+
+ def rule_exists(self, inc_rule):
+ '''attempt to find the inc_rule in the rules list'''
+ for rule in self.rules:
+ if rule == inc_rule:
+ return True
+
+ return False
+
+ def compare(self, other, verbose=False):
+ '''compare function for clusterrole'''
+ for rule in other.rules:
+ if rule not in self.rules:
+ if verbose:
+ print('Rule in other not found in self. [{}]'.format(rule))
+ return False
+
+ for rule in self.rules:
+ if rule not in other.rules:
+ if verbose:
+ print('Rule in self not found in other. [{}]'.format(rule))
+ return False
+
+ return True
+
+ @staticmethod
+ def builder(name='default_clusterrole', rules=None):
+ '''return a clusterrole with name and/or rules'''
+ if rules is None:
+ rules = [{'apiGroups': [""],
+ 'attributeRestrictions': None,
+ 'verbs': [],
+ 'resources': []}]
+ content = {
+ 'apiVersion': 'v1',
+ 'kind': 'ClusterRole',
+ 'metadata': {'name': '{}'.format(name)},
+ 'rules': rules,
+ }
+
+ return ClusterRole(content=content)
+
+
+# -*- -*- -*- End included fragment: lib/clusterrole.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_clusterrole.py -*- -*- -*-
+
+
+# pylint: disable=too-many-instance-attributes
+class OCClusterRole(OpenShiftCLI):
+ ''' Class to manage clusterrole objects'''
+ kind = 'clusterrole'
+
+ def __init__(self,
+ name,
+ rules=None,
+ kubeconfig=None,
+ verbose=False):
+ ''' Constructor for OCClusterRole '''
+ super(OCClusterRole, self).__init__(None, kubeconfig=kubeconfig, verbose=verbose)
+ self.verbose = verbose
+ self.name = name
+ self._clusterrole = None
+ self._inc_clusterrole = ClusterRole.builder(name, rules)
+
+ @property
+ def clusterrole(self):
+ ''' property for clusterrole'''
+ if not self._clusterrole:
+ self.get()
+ return self._clusterrole
+
+ @clusterrole.setter
+ def clusterrole(self, data):
+ ''' setter function for clusterrole property'''
+ self._clusterrole = data
+
+ @property
+ def inc_clusterrole(self):
+ ''' property for inc_clusterrole'''
+ return self._inc_clusterrole
+
+ @inc_clusterrole.setter
+ def inc_clusterrole(self, data):
+ ''' setter function for inc_clusterrole property'''
+ self._inc_clusterrole = data
+
+ def exists(self):
+ ''' return whether a clusterrole exists '''
+ if self.clusterrole:
+ return True
+
+ return False
+
+ def get(self):
+ '''return a clusterrole '''
+ result = self._get(self.kind, self.name)
+
+ if result['returncode'] == 0:
+ self.clusterrole = ClusterRole(content=result['results'][0])
+ result['results'] = self.clusterrole.yaml_dict
+
+ elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']:
+ result['returncode'] = 0
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.name)
+
+ def create(self):
+ '''create a clusterrole from the proposed incoming clusterrole'''
+ return self._create_from_content(self.name, self.inc_clusterrole.yaml_dict)
+
+ def update(self):
+ '''update a project'''
+ return self._replace_content(self.kind, self.name, self.inc_clusterrole.yaml_dict)
+
+ def needs_update(self):
+ ''' verify an update is needed'''
+ return not self.clusterrole.compare(self.inc_clusterrole, self.verbose)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+
+ oc_clusterrole = OCClusterRole(params['name'],
+ params['rules'],
+ params['kubeconfig'],
+ params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_clusterrole.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_clusterrole.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ api_rval = oc_clusterrole.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_clusterrole.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ # Create it here
+ api_rval = oc_clusterrole.create()
+
+ # return the created object
+ api_rval = oc_clusterrole.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if oc_clusterrole.needs_update():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = oc_clusterrole.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_clusterrole.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ return {'failed': True,
+ 'changed': False,
+ 'msg': 'Unknown state passed. [%s]' % state}
+
+# -*- -*- -*- End included fragment: class/oc_clusterrole.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_clusterrole.py -*- -*- -*-
+
+def main():
+ '''
+ ansible oc module for clusterrole
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, type='str'),
+ rules=dict(default=None, type='list'),
+ ),
+ supports_check_mode=True,
+ )
+
+ results = OCClusterRole.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_clusterrole.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
new file mode 100644
index 000000000..69dd23a0e
--- /dev/null
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -0,0 +1,1628 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/configmap -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_configmap
+short_description: Modify, and idempotently manage openshift configmaps
+description:
+ - Modify openshift configmaps programmatically.
+options:
+ state:
+ description:
+ - Supported states, present, absent, list
+ - present - will ensure object is created or updated to the value specified
+ - list - will return a configmap
+ - absent - will remove the configmap
+ required: False
+ default: present
+ choices: ["present", 'absent', 'list']
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: True
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: default
+ aliases: []
+ from_file:
+ description:
+ - A dict of key, value pairs representing the configmap key and the value represents the file path.
+ required: false
+ default: None
+ aliases: []
+ from_literal:
+ description:
+ - A dict of key, value pairs representing the configmap key and the value represents the string content
+ required: false
+ default: None
+ aliases: []
+author:
+- "kenny woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: create group
+ oc_configmap:
+ state: present
+ name: testmap
+ from_file:
+ secret: /path/to/secret
+ from_literal:
+ title: systemadmin
+ register: configout
+'''
+
+# -*- -*- -*- End included fragment: doc/configmap -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+
+class YeditException(Exception): # pragma: no cover
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object): # pragma: no cover
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for separator '''
+ return self._separator
+
+ @separator.setter
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is not None:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ elif isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.safe_load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
+
+ return inc_value
+
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
+
+ if params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and state != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': state}
+
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
+ else:
+ rval = yamlfile.delete(params['key'])
+
+ if rval[0] and params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
+
+ elif state == 'present':
+ # check if content is different than what is in the file
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+
+ yamlfile.yaml_dict = content
+
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
+
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
+
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
+ yamlfile.write()
+
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
+
+ # no edits to make
+ if params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': state}
+
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, name=None, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, name=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ cmd.append('--schedulable={}'.format(schedulable))
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ if grace_period:
+ cmd.append('--grace-period={}'.format(int(grace_period)))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode(), stderr.decode()
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object): # pragma: no cover
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_configmap.py -*- -*- -*-
+
+
+# pylint: disable=too-many-arguments
+class OCConfigMap(OpenShiftCLI):
+ ''' Openshift ConfigMap Class
+
+ ConfigMaps are a way to store data inside of objects
+ '''
+ def __init__(self,
+ name,
+ from_file,
+ from_literal,
+ state,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ super(OCConfigMap, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
+ self.name = name
+ self.state = state
+ self._configmap = None
+ self._inc_configmap = None
+ self.from_file = from_file if from_file is not None else {}
+ self.from_literal = from_literal if from_literal is not None else {}
+
+ @property
+ def configmap(self):
+ if self._configmap is None:
+ self._configmap = self.get()
+
+ return self._configmap
+
+ @configmap.setter
+ def configmap(self, inc_map):
+ self._configmap = inc_map
+
+ @property
+ def inc_configmap(self):
+ if self._inc_configmap is None:
+ results = self.create(dryrun=True, output=True)
+ self._inc_configmap = results['results']
+
+ return self._inc_configmap
+
+ @inc_configmap.setter
+ def inc_configmap(self, inc_map):
+ self._inc_configmap = inc_map
+
+ def from_file_to_params(self):
+ '''return from_files in a string ready for cli'''
+ return ["--from-file={}={}".format(key, value) for key, value in self.from_file.items()]
+
+ def from_literal_to_params(self):
+ '''return from_literal in a string ready for cli'''
+ return ["--from-literal={}={}".format(key, value) for key, value in self.from_literal.items()]
+
+ def get(self):
+ '''return a configmap by name '''
+ results = self._get('configmap', self.name)
+ if results['returncode'] == 0 and results['results'][0]:
+ self.configmap = results['results'][0]
+
+ if results['returncode'] != 0 and '"{}" not found'.format(self.name) in results['stderr']:
+ results['returncode'] = 0
+
+ return results
+
+ def delete(self):
+ '''delete a configmap by name'''
+ return self._delete('configmap', self.name)
+
+ def create(self, dryrun=False, output=False):
+ '''Create a configmap
+
+ :dryrun: Product what you would have done. default: False
+ :output: Whether to parse output. default: False
+ '''
+
+ cmd = ['create', 'configmap', self.name]
+ if self.from_literal is not None:
+ cmd.extend(self.from_literal_to_params())
+
+ if self.from_file is not None:
+ cmd.extend(self.from_file_to_params())
+
+ if dryrun:
+ cmd.extend(['--dry-run', '-ojson'])
+
+ results = self.openshift_cmd(cmd, output=output)
+
+ return results
+
+ def update(self):
+ '''run update configmap '''
+ return self._replace_content('configmap', self.name, self.inc_configmap)
+
+ def needs_update(self):
+ '''compare the current configmap with the proposed and return if they are equal'''
+ return not Utils.check_def_equal(self.inc_configmap, self.configmap, debug=self.verbose)
+
+ @staticmethod
+ # pylint: disable=too-many-return-statements,too-many-branches
+ # TODO: This function should be refactored into its individual parts.
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ oc_cm = OCConfigMap(params['name'],
+ params['from_file'],
+ params['from_literal'],
+ params['state'],
+ params['namespace'],
+ kubeconfig=params['kubeconfig'],
+ verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_cm.get()
+
+ if 'failed' in api_rval:
+ return {'failed': True, 'msg': api_rval}
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ if not params['name']:
+ return {'failed': True,
+ 'msg': 'Please specify a name when state is absent|present.'}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not Utils.exists(api_rval['results'], params['name']):
+ return {'changed': False, 'state': 'absent'}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ api_rval = oc_cm.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Create
+ ########
+ if state == 'present':
+ if not Utils.exists(api_rval['results'], params['name']):
+
+ if check_mode:
+ return {'changed': True, 'msg': 'Would have performed a create.'}
+
+ api_rval = oc_cm.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ api_rval = oc_cm.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if oc_cm.needs_update():
+
+ api_rval = oc_cm.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ api_rval = oc_cm.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
+
+# -*- -*- -*- End included fragment: class/oc_configmap.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_configmap.py -*- -*- -*-
+
+
+def main():
+ '''
+ ansible oc module for managing OpenShift configmap objects
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default=None, required=True, type='str'),
+ from_file=dict(default=None, type='dict'),
+ from_literal=dict(default=None, type='dict'),
+ ),
+ supports_check_mode=True,
+ )
+
+
+ rval = OCConfigMap.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_configmap.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index 7a7eaf40a..70329ccfe 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -169,17 +169,15 @@ oc_edit:
# -*- -*- -*- End included fragment: doc/edit -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -204,13 +202,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -226,13 +224,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -254,7 +252,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -343,7 +341,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -443,7 +441,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -562,8 +560,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -624,7 +622,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -650,7 +658,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -682,114 +690,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
- if module.params['src']:
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
- if rval[0] and module.params['src']:
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -890,11 +933,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -912,7 +959,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -929,13 +976,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -955,9 +1002,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -972,10 +1019,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -988,16 +1035,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1078,9 +1125,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1102,7 +1149,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index a1994b0f1..bda5eebc5 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -136,17 +136,15 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/env -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -171,13 +169,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -193,13 +191,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -221,7 +219,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -310,7 +308,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -410,7 +408,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -529,8 +527,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -591,7 +589,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -617,7 +625,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -649,114 +657,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
- if module.params['src']:
+ state = params['state']
+
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
- if rval[0] and module.params['src']:
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -857,11 +900,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -879,7 +926,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -896,13 +943,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -922,9 +969,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -939,10 +986,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -955,16 +1002,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1045,9 +1092,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1069,7 +1116,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
new file mode 100644
index 000000000..462e14868
--- /dev/null
+++ b/roles/lib_openshift/library/oc_group.py
@@ -0,0 +1,1607 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/group -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_group
+short_description: Modify, and idempotently manage openshift groups.
+description:
+ - Modify openshift groups programmatically.
+options:
+ state:
+ description:
+ - Supported states, present, absent, list
+ - present - will ensure object is created or updated to the value specified
+ - list - will return a group
+ - absent - will remove the group
+ required: False
+ default: present
+ choices: ["present", 'absent', 'list']
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+author:
+- "Joel Diaz <jdiaz@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: create group
+ oc_group:
+ state: present
+ name: acme_org
+ register: group_out
+'''
+
+# -*- -*- -*- End included fragment: doc/group -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+
+class YeditException(Exception): # pragma: no cover
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object): # pragma: no cover
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for separator '''
+ return self._separator
+
+ @separator.setter
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is not None:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ elif isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.safe_load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
+
+ return inc_value
+
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
+
+ if params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and state != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': state}
+
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
+ else:
+ rval = yamlfile.delete(params['key'])
+
+ if rval[0] and params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
+
+ elif state == 'present':
+ # check if content is different than what is in the file
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+
+ yamlfile.yaml_dict = content
+
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
+
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
+
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
+ yamlfile.write()
+
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
+
+ # no edits to make
+ if params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': state}
+
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, name=None, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, name=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ cmd.append('--schedulable={}'.format(schedulable))
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ if grace_period:
+ cmd.append('--grace-period={}'.format(int(grace_period)))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode(), stderr.decode()
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object): # pragma: no cover
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/group.py -*- -*- -*-
+
+
+class GroupConfig(object):
+ ''' Handle route options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ namespace,
+ kubeconfig):
+ ''' constructor for handling group options '''
+ self.kubeconfig = kubeconfig
+ self.name = sname
+ self.namespace = namespace
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' return a service as a dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'Group'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ self.data['users'] = None
+
+
+# pylint: disable=too-many-instance-attributes
+class Group(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'group'
+
+ def __init__(self, content):
+ '''Group constructor'''
+ super(Group, self).__init__(content=content)
+
+# -*- -*- -*- End included fragment: lib/group.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_group.py -*- -*- -*-
+
+
+class OCGroup(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'group'
+
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCGroup '''
+ super(OCGroup, self).__init__(config.namespace, config.kubeconfig)
+ self.config = config
+ self.namespace = config.namespace
+ self._group = None
+
+ @property
+ def group(self):
+ ''' property function service'''
+ if not self._group:
+ self.get()
+ return self._group
+
+ @group.setter
+ def group(self, data):
+ ''' setter function for yedit var '''
+ self._group = data
+
+ def exists(self):
+ ''' return whether a group exists '''
+ if self.group:
+ return True
+
+ return False
+
+ def get(self):
+ '''return group information '''
+ result = self._get(self.kind, self.config.name)
+ if result['returncode'] == 0:
+ self.group = Group(content=result['results'][0])
+ elif 'groups \"{}\" not found'.format(self.config.name) in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create the object'''
+ return self._create_from_content(self.config.name, self.config.data)
+
+ def update(self):
+ '''update the object'''
+ return self._replace_content(self.kind, self.config.name, self.config.data)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=[], debug=True)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, check_mode=False):
+ '''run the idempotent ansible code'''
+
+ gconfig = GroupConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ )
+ oc_group = OCGroup(gconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_group.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_group.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ api_rval = oc_group.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_group.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ # Create it here
+ api_rval = oc_group.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_group.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if oc_group.needs_update():
+ api_rval = oc_group.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_group.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
+
+# -*- -*- -*- End included fragment: class/oc_group.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_group.py -*- -*- -*-
+
+#pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for group
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, type='str'),
+ namespace=dict(default='default', type='str'),
+ # addind users to a group is handled through the oc_users module
+ #users=dict(default=None, type='list'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCGroup.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in rval:
+ return module.fail_json(**rval)
+
+ return module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_group.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
new file mode 100644
index 000000000..8aed060bb
--- /dev/null
+++ b/roles/lib_openshift/library/oc_image.py
@@ -0,0 +1,1533 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/image -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_image
+short_description: Create, modify, and idempotently manage openshift labels.
+description:
+ - Modify openshift labels programmatically.
+options:
+ state:
+ description:
+ - State controls the action that will be taken with resource
+ - 'present' will create. Does _not_ support update.
+ - 'list' will read the labels
+ default: present
+ choices: ["present", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ namespace:
+ description:
+ - The namespace where this object lives
+ required: false
+ default: default
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ registry_url:
+ description:
+ - The url for the registry so that openshift can pull the image
+ required: false
+ default: None
+ aliases: []
+ image_name:
+ description:
+ - The name of the image being imported
+ required: false
+ default: False
+ aliases: []
+ image_tag:
+ description:
+ - The tag of the image being imported
+ required: false
+ default: None
+ aliases: []
+author:
+- "Ivan Horvath<ihorvath@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: Get an imagestream
+ oc_image:
+ name: php55
+ state: list
+ register: imageout
+
+- name: create an imagestream
+ oc_image:
+ state: present
+ image_name: php55
+ image_tag: int
+ registry_url: registry.example.com
+ namespace: default
+ register: imageout
+'''
+
+# -*- -*- -*- End included fragment: doc/image -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+
+class YeditException(Exception): # pragma: no cover
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object): # pragma: no cover
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for separator '''
+ return self._separator
+
+ @separator.setter
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is not None:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ elif isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.safe_load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
+
+ return inc_value
+
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
+
+ if params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and state != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': state}
+
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
+ else:
+ rval = yamlfile.delete(params['key'])
+
+ if rval[0] and params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
+
+ elif state == 'present':
+ # check if content is different than what is in the file
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+
+ yamlfile.yaml_dict = content
+
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
+
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
+
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
+ yamlfile.write()
+
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
+
+ # no edits to make
+ if params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': state}
+
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, name=None, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, name=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ cmd.append('--schedulable={}'.format(schedulable))
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ if grace_period:
+ cmd.append('--grace-period={}'.format(int(grace_period)))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode(), stderr.decode()
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object): # pragma: no cover
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_image.py -*- -*- -*-
+
+
+# pylint: disable=too-many-arguments
+class OCImage(OpenShiftCLI):
+ ''' Class to import and create an imagestream object'''
+ def __init__(self,
+ namespace,
+ registry_url,
+ image_name,
+ image_tag,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for OCImage'''
+ super(OCImage, self).__init__(namespace, kubeconfig)
+ self.registry_url = registry_url
+ self.image_name = image_name
+ self.image_tag = image_tag
+ self.verbose = verbose
+
+ def get(self):
+ '''return a image by name '''
+ results = self._get('imagestream', self.image_name)
+ results['exists'] = False
+ if results['returncode'] == 0 and results['results'][0]:
+ results['exists'] = True
+
+ if results['returncode'] != 0 and '"{}" not found'.format(self.image_name) in results['stderr']:
+ results['returncode'] = 0
+
+ return results
+
+ def create(self, url=None, name=None, tag=None):
+ '''Create an image '''
+ return self._import_image(url, name, tag)
+
+
+ # pylint: disable=too-many-return-statements
+ @staticmethod
+ def run_ansible(params, check_mode):
+ ''' run the ansible idempotent code '''
+
+ ocimage = OCImage(params['namespace'],
+ params['registry_url'],
+ params['image_name'],
+ params['image_tag'],
+ kubeconfig=params['kubeconfig'],
+ verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = ocimage.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ if api_rval['returncode'] != 0:
+ return {"failed": True, "msg": api_rval}
+ return {"changed": False, "results": api_rval, "state": "list"}
+
+ ########
+ # Create
+ ########
+ if state == 'present':
+
+ if not Utils.exists(api_rval['results'], params['image_name']):
+
+ if check_mode:
+ return {"changed": False, "msg": 'CHECK_MODE: Would have performed a create'}
+
+ api_rval = ocimage.create(params['registry_url'],
+ params['image_name'],
+ params['image_tag'])
+
+ if api_rval['returncode'] != 0:
+ return {"failed": True, "msg": api_rval}
+
+ # return the newly created object
+ api_rval = ocimage.get()
+
+ if api_rval['returncode'] != 0:
+ return {"failed": True, "msg": api_rval}
+
+ return {"changed": True, "results": api_rval, "state": "present"}
+
+ # image exists, no change
+ return {"changed": False, "results": api_rval, "state": "present"}
+
+ return {"failed": True, "changed": False, "msg": "Unknown state passed. {0}".format(state)}
+
+# -*- -*- -*- End included fragment: class/oc_image.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_image.py -*- -*- -*-
+
+
+def main():
+ '''
+ ansible oc module for image import
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'list']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ registry_url=dict(default=None, type='str'),
+ image_name=dict(default=None, required=True, type='str'),
+ image_tag=dict(default=None, type='str'),
+ force=dict(default=False, type='bool'),
+ ),
+
+ supports_check_mode=True,
+ )
+
+ rval = OCImage.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_image.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index 109a78184..0d18a7afe 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -145,17 +145,15 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/label -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -180,13 +178,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -202,13 +200,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -230,7 +228,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -319,7 +317,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -419,7 +417,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -538,8 +536,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -600,7 +598,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -626,7 +634,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -658,114 +666,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
- if module.params['src']:
+ state = params['state']
+
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
- if rval[0] and module.params['src']:
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -866,11 +909,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -888,7 +935,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -905,13 +952,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -931,9 +978,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -948,10 +995,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -964,16 +1011,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1054,9 +1101,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1078,7 +1125,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1508,9 +1555,9 @@ class OCLabel(OpenShiftCLI):
label_list = []
if self.name:
- result = self._get(resource=self.kind, rname=self.name, selector=self.selector)
+ result = self._get(resource=self.kind, name=self.name, selector=self.selector)
- if 'labels' in result['results'][0]['metadata']:
+ if result['results'][0] and 'labels' in result['results'][0]['metadata']:
label_list.append(result['results'][0]['metadata']['labels'])
else:
label_list.append({})
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index bd6e77c2a..0b01670c6 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -148,17 +148,15 @@ register: router_output
# -*- -*- -*- End included fragment: doc/obj -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -183,13 +181,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -205,13 +203,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -233,7 +231,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -322,7 +320,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -422,7 +420,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -541,8 +539,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -603,7 +601,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -629,7 +637,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -661,114 +669,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
- if module.params['src']:
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
- if rval[0] and module.params['src']:
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -869,11 +912,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -891,7 +938,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -908,13 +955,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -934,9 +981,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -951,10 +998,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -967,16 +1014,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1057,9 +1104,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1081,7 +1128,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1387,7 +1434,7 @@ class OCObject(OpenShiftCLI):
def __init__(self,
kind,
namespace,
- rname=None,
+ name=None,
selector=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
@@ -1396,21 +1443,21 @@ class OCObject(OpenShiftCLI):
super(OCObject, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose,
all_namespaces=all_namespaces)
self.kind = kind
- self.name = rname
+ self.name = name
self.selector = selector
def get(self):
'''return a kind by name '''
- results = self._get(self.kind, rname=self.name, selector=self.selector)
- if results['returncode'] != 0 and 'stderr' in results and \
- '\"%s\" not found' % self.name in results['stderr']:
+ results = self._get(self.kind, name=self.name, selector=self.selector)
+ if (results['returncode'] != 0 and 'stderr' in results and
+ '\"{}\" not found'.format(self.name) in results['stderr']):
results['returncode'] = 0
return results
def delete(self):
- '''return all pods '''
- return self._delete(self.kind, self.name)
+ '''delete the object'''
+ return self._delete(self.kind, name=self.name, selector=self.selector)
def create(self, files=None, content=None):
'''
@@ -1486,24 +1533,33 @@ class OCObject(OpenShiftCLI):
# Get
#####
if state == 'list':
- return {'changed': False, 'results': api_rval, 'state': 'list'}
-
- if not params['name']:
- return {'failed': True, 'msg': 'Please specify a name when state is absent|present.'} # noqa: E501
+ return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
- if not Utils.exists(api_rval['results'], params['name']):
- return {'changed': False, 'state': 'absent'}
+ # if we were passed a name, verify its not in our results
+ if params['name'] is not None and not Utils.exists(api_rval['results'], params['name']):
+ return {'changed': False, 'state': state}
+
+ # verify results are empty for the selector
+ if params['selector'] is not None and len(api_rval['results']) == 0:
+ return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'}
api_rval = ocobj.delete()
- return {'changed': True, 'results': api_rval, 'state': 'absent'}
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ # create/update: Must define a name beyond this point
+ if not params['name']:
+ return {'failed': True, 'msg': 'Please specify a name when state is present.'}
if state == 'present':
########
@@ -1529,7 +1585,7 @@ class OCObject(OpenShiftCLI):
if params['files'] and params['delete_after']:
Utils.cleanup(params['files'])
- return {'changed': True, 'results': api_rval, 'state': "present"}
+ return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
@@ -1544,7 +1600,7 @@ class OCObject(OpenShiftCLI):
if params['files'] and params['delete_after']:
Utils.cleanup(params['files'])
- return {'changed': False, 'results': api_rval['results'][0], 'state': "present"}
+ return {'changed': False, 'results': api_rval['results'][0], 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
@@ -1563,7 +1619,7 @@ class OCObject(OpenShiftCLI):
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
- return {'changed': True, 'results': api_rval, 'state': "present"}
+ return {'changed': True, 'results': api_rval, 'state': state}
# -*- -*- -*- End included fragment: class/oc_obj.py -*- -*- -*-
@@ -1591,7 +1647,7 @@ def main():
force=dict(default=False, type='bool'),
selector=dict(default=None, type='str'),
),
- mutually_exclusive=[["content", "files"]],
+ mutually_exclusive=[["content", "files"], ["selector", "name"]],
supports_check_mode=True,
)
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index 1d0e4c876..9b321b47c 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -80,17 +80,15 @@ oc_objectvalidator:
# -*- -*- -*- End included fragment: doc/objectvalidator -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -115,13 +113,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -137,13 +135,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -165,7 +163,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -254,7 +252,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -354,7 +352,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -473,8 +471,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -535,7 +533,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -561,7 +569,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -593,114 +601,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
- if module.params['src']:
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
- if rval[0] and module.params['src']:
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -801,11 +844,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -823,7 +870,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -840,13 +887,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -866,9 +913,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -883,10 +930,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -899,16 +946,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -989,9 +1036,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1013,7 +1060,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 14d519e52..34f80ce13 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -137,17 +137,15 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/process -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -172,13 +170,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -194,13 +192,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -222,7 +220,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -311,7 +309,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -411,7 +409,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -530,8 +528,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -592,7 +590,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -618,7 +626,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -650,114 +658,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
- if module.params['src']:
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
- if rval[0] and module.params['src']:
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -858,11 +901,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -880,7 +927,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -897,13 +944,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -923,9 +970,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -940,10 +987,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -956,16 +1003,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1046,9 +1093,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1070,7 +1117,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1502,7 +1549,7 @@ class OCProcess(OpenShiftCLI):
if api_rval['returncode'] != 0:
return {"failed": True, "msg" : api_rval}
- return {"changed" : False, "results": api_rval, "state": "list"}
+ return {"changed" : False, "results": api_rval, "state": state}
elif state == 'present':
if check_mode and params['create']:
@@ -1524,9 +1571,9 @@ class OCProcess(OpenShiftCLI):
return {"failed": True, "msg": api_rval}
if params['create']:
- return {"changed": True, "results": api_rval, "state": "present"}
+ return {"changed": True, "results": api_rval, "state": state}
- return {"changed": False, "results": api_rval, "state": "present"}
+ return {"changed": False, "results": api_rval, "state": state}
# verify results
update = False
@@ -1541,13 +1588,13 @@ class OCProcess(OpenShiftCLI):
update = True
if not update:
- return {"changed": update, "results": api_rval, "state": "present"}
+ return {"changed": update, "results": api_rval, "state": state}
for cmd in rval:
if cmd['returncode'] != 0:
- return {"failed": True, "changed": update, "results": rval, "state": "present"}
+ return {"failed": True, "changed": update, "msg": rval, "state": state}
- return {"changed": update, "results": rval, "state": "present"}
+ return {"changed": update, "results": rval, "state": state}
# -*- -*- -*- End included fragment: class/oc_process.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index 4f82abcfe..331f31e41 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -134,17 +134,15 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/project -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -169,13 +167,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -191,13 +189,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -219,7 +217,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -308,7 +306,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -408,7 +406,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -527,8 +525,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -589,7 +587,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -615,7 +623,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -647,114 +655,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
- if module.params['src']:
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
- if rval[0] and module.params['src']:
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -855,11 +898,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -877,7 +924,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -894,13 +941,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -920,9 +967,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -937,10 +984,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -953,16 +1000,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1043,9 +1090,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1067,7 +1114,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
@@ -1511,30 +1558,34 @@ class OCProject(OpenShiftCLI):
def update(self):
'''update a project '''
- self.project.update_annotation('display-name', self.config.config_options['display_name']['value'])
- self.project.update_annotation('description', self.config.config_options['description']['value'])
+ if self.config.config_options['display_name']['value'] is not None:
+ self.project.update_annotation('display-name', self.config.config_options['display_name']['value'])
+
+ if self.config.config_options['description']['value'] is not None:
+ self.project.update_annotation('description', self.config.config_options['description']['value'])
# work around for immutable project field
- if self.config.config_options['node_selector']['value']:
+ if self.config.config_options['node_selector']['value'] is not None:
self.project.update_annotation('node-selector', self.config.config_options['node_selector']['value'])
- else:
- self.project.update_annotation('node-selector', self.project.find_annotation('node-selector'))
return self._replace_content(self.kind, self.config.name, self.project.yaml_dict)
def needs_update(self):
''' verify an update is needed '''
- result = self.project.find_annotation("display-name")
- if result != self.config.config_options['display_name']['value']:
- return True
+ if self.config.config_options['display_name']['value'] is not None:
+ result = self.project.find_annotation("display-name")
+ if result != self.config.config_options['display_name']['value']:
+ return True
- result = self.project.find_annotation("description")
- if result != self.config.config_options['description']['value']:
- return True
+ if self.config.config_options['description']['value'] is not None:
+ result = self.project.find_annotation("description")
+ if result != self.config.config_options['description']['value']:
+ return True
- result = self.project.find_annotation("node-selector")
- if result != self.config.config_options['node_selector']['value']:
- return True
+ if self.config.config_options['node_selector']['value'] is not None:
+ result = self.project.find_annotation("node-selector")
+ if result != self.config.config_options['node_selector']['value']:
+ return True
return False
@@ -1543,19 +1594,22 @@ class OCProject(OpenShiftCLI):
def run_ansible(params, check_mode):
'''run the idempotent ansible code'''
- _ns = None
+ node_selector = None
if params['node_selector'] is not None:
- _ns = ','.join(params['node_selector'])
-
- pconfig = ProjectConfig(params['name'],
- 'None',
- params['kubeconfig'],
- {'admin': {'value': params['admin'], 'include': True},
- 'admin_role': {'value': params['admin_role'], 'include': True},
- 'description': {'value': params['description'], 'include': True},
- 'display_name': {'value': params['display_name'], 'include': True},
- 'node_selector': {'value': _ns, 'include': True},
- })
+ node_selector = ','.join(params['node_selector'])
+
+ pconfig = ProjectConfig(
+ params['name'],
+ 'None',
+ params['kubeconfig'],
+ {
+ 'admin': {'value': params['admin'], 'include': True},
+ 'admin_role': {'value': params['admin_role'], 'include': True},
+ 'description': {'value': params['description'], 'include': True},
+ 'display_name': {'value': params['display_name'], 'include': True},
+ 'node_selector': {'value': node_selector, 'include': True},
+ },
+ )
oadm_project = OCProject(pconfig, verbose=params['debug'])
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
new file mode 100644
index 000000000..3e4601cc3
--- /dev/null
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -0,0 +1,1780 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/pvc -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_pvc
+short_description: Modify, and idempotently manage openshift persistent volume claims
+description:
+ - Modify openshift persistent volume claims programmatically.
+options:
+ state:
+ description:
+ - Supported states, present, absent, list
+ - present - will ensure object is created or updated to the value specified
+ - list - will return a pvc
+ - absent - will remove a pvc
+ required: False
+ default: present
+ choices: ["present", 'absent', 'list']
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ volume_capacity:
+ description:
+ - The requested volume capacity
+ required: False
+ default: 1G
+ aliases: []
+ access_modes:
+ description:
+ - The access modes allowed for the pvc
+ - Expects a list
+ required: False
+ default: ReadWriteOnce
+ choices:
+ - ReadWriteOnce
+ - ReadOnlyMany
+ - ReadWriteMany
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: create a pvc
+ oc_pvc:
+ namespace: awesomeapp
+ name: dbstorage
+ access_modes:
+ - ReadWriteOnce
+ volume_capacity: 5G
+ register: pvcout
+'''
+
+# -*- -*- -*- End included fragment: doc/pvc -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+
+class YeditException(Exception): # pragma: no cover
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object): # pragma: no cover
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for separator '''
+ return self._separator
+
+ @separator.setter
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is not None:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ elif isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.safe_load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
+
+ return inc_value
+
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
+
+ if params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and state != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': state}
+
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
+ else:
+ rval = yamlfile.delete(params['key'])
+
+ if rval[0] and params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
+
+ elif state == 'present':
+ # check if content is different than what is in the file
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+
+ yamlfile.yaml_dict = content
+
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
+
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
+
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
+ yamlfile.write()
+
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
+
+ # no edits to make
+ if params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': state}
+
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, name=None, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, name=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ cmd.append('--schedulable={}'.format(schedulable))
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ if grace_period:
+ cmd.append('--grace-period={}'.format(int(grace_period)))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode(), stderr.decode()
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object): # pragma: no cover
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/pvc.py -*- -*- -*-
+
+
+# pylint: disable=too-many-instance-attributes
+class PersistentVolumeClaimConfig(object):
+ ''' Handle pvc options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ namespace,
+ kubeconfig,
+ access_modes=None,
+ vol_capacity='1G'):
+ ''' constructor for handling pvc options '''
+ self.kubeconfig = kubeconfig
+ self.name = sname
+ self.namespace = namespace
+ self.access_modes = access_modes
+ self.vol_capacity = vol_capacity
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' return a service as a dict '''
+ # version
+ self.data['apiVersion'] = 'v1'
+ # kind
+ self.data['kind'] = 'PersistentVolumeClaim'
+ # metadata
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ # spec
+ self.data['spec'] = {}
+ self.data['spec']['accessModes'] = ['ReadWriteOnce']
+ if self.access_modes:
+ self.data['spec']['accessModes'] = self.access_modes
+
+ # storage capacity
+ self.data['spec']['resources'] = {}
+ self.data['spec']['resources']['requests'] = {}
+ self.data['spec']['resources']['requests']['storage'] = self.vol_capacity
+
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class PersistentVolumeClaim(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ access_modes_path = "spec.accessModes"
+ volume_capacity_path = "spec.requests.storage"
+ volume_name_path = "spec.volumeName"
+ bound_path = "status.phase"
+ kind = 'PersistentVolumeClaim'
+
+ def __init__(self, content):
+ '''RoleBinding constructor'''
+ super(PersistentVolumeClaim, self).__init__(content=content)
+ self._access_modes = None
+ self._volume_capacity = None
+ self._volume_name = None
+
+ @property
+ def volume_name(self):
+ ''' volume_name property '''
+ if self._volume_name is None:
+ self._volume_name = self.get_volume_name()
+ return self._volume_name
+
+ @volume_name.setter
+ def volume_name(self, data):
+ ''' volume_name property setter'''
+ self._volume_name = data
+
+ @property
+ def access_modes(self):
+ ''' access_modes property '''
+ if self._access_modes is None:
+ self._access_modes = self.get_access_modes()
+ if not isinstance(self._access_modes, list):
+ self._access_modes = list(self._access_modes)
+
+ return self._access_modes
+
+ @access_modes.setter
+ def access_modes(self, data):
+ ''' access_modes property setter'''
+ if not isinstance(data, list):
+ data = list(data)
+
+ self._access_modes = data
+
+ @property
+ def volume_capacity(self):
+ ''' volume_capacity property '''
+ if self._volume_capacity is None:
+ self._volume_capacity = self.get_volume_capacity()
+ return self._volume_capacity
+
+ @volume_capacity.setter
+ def volume_capacity(self, data):
+ ''' volume_capacity property setter'''
+ self._volume_capacity = data
+
+ def get_access_modes(self):
+ '''get access_modes'''
+ return self.get(PersistentVolumeClaim.access_modes_path) or []
+
+ def get_volume_capacity(self):
+ '''get volume_capacity'''
+ return self.get(PersistentVolumeClaim.volume_capacity_path) or []
+
+ def get_volume_name(self):
+ '''get volume_name'''
+ return self.get(PersistentVolumeClaim.volume_name_path) or []
+
+ def is_bound(self):
+ '''return whether volume is bound'''
+ return self.get(PersistentVolumeClaim.bound_path) or []
+
+ #### ADD #####
+ def add_access_mode(self, inc_mode):
+ ''' add an access_mode'''
+ if self.access_modes:
+ self.access_modes.append(inc_mode)
+ else:
+ self.put(PersistentVolumeClaim.access_modes_path, [inc_mode])
+
+ return True
+
+ #### /ADD #####
+
+ #### Remove #####
+ def remove_access_mode(self, inc_mode):
+ ''' remove an access_mode'''
+ try:
+ self.access_modes.remove(inc_mode)
+ except ValueError as _:
+ return False
+
+ return True
+
+ #### /REMOVE #####
+
+ #### UPDATE #####
+ def update_access_mode(self, inc_mode):
+ ''' update an access_mode'''
+ try:
+ index = self.access_modes.index(inc_mode)
+ except ValueError as _:
+ return self.add_access_mode(inc_mode)
+
+ self.access_modes[index] = inc_mode
+
+ return True
+
+ #### /UPDATE #####
+
+ #### FIND ####
+ def find_access_mode(self, inc_mode):
+ ''' find a user '''
+ index = None
+ try:
+ index = self.access_modes.index(inc_mode)
+ except ValueError as _:
+ return index
+
+ return index
+
+# -*- -*- -*- End included fragment: lib/pvc.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_pvc.py -*- -*- -*-
+
+
+# pylint: disable=too-many-instance-attributes
+class OCPVC(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'pvc'
+
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCVolume '''
+ super(OCPVC, self).__init__(config.namespace, config.kubeconfig)
+ self.config = config
+ self.namespace = config.namespace
+ self._pvc = None
+
+ @property
+ def pvc(self):
+ ''' property function pvc'''
+ if not self._pvc:
+ self.get()
+ return self._pvc
+
+ @pvc.setter
+ def pvc(self, data):
+ ''' setter function for yedit var '''
+ self._pvc = data
+
+ def bound(self):
+ '''return whether the pvc is bound'''
+ if self.pvc.get_volume_name():
+ return True
+
+ return False
+
+ def exists(self):
+ ''' return whether a pvc exists '''
+ if self.pvc:
+ return True
+
+ return False
+
+ def get(self):
+ '''return pvc information '''
+ result = self._get(self.kind, self.config.name)
+ if result['returncode'] == 0:
+ self.pvc = PersistentVolumeClaim(content=result['results'][0])
+ elif '\"%s\" not found' % self.config.name in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create the object'''
+ return self._create_from_content(self.config.name, self.config.data)
+
+ def update(self):
+ '''update the object'''
+ # need to update the tls information and the service name
+ return self._replace_content(self.kind, self.config.name, self.config.data)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ if self.pvc.get_volume_name() or self.pvc.is_bound():
+ return False
+
+ skip = []
+ return not Utils.check_def_equal(self.config.data, self.pvc.yaml_dict, skip_keys=skip, debug=True)
+
+ # pylint: disable=too-many-branches,too-many-return-statements
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+ pconfig = PersistentVolumeClaimConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ params['access_modes'],
+ params['volume_capacity'],
+ )
+ oc_pvc = OCPVC(pconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_pvc.get()
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_pvc.exists():
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ api_rval = oc_pvc.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_pvc.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ # Create it here
+ api_rval = oc_pvc.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_pvc.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if oc_pvc.pvc.is_bound() or oc_pvc.pvc.get_volume_name():
+ api_rval['msg'] = '##### - This volume is currently bound. Will not update - ####'
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ if oc_pvc.needs_update():
+ api_rval = oc_pvc.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_pvc.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
+
+# -*- -*- -*- End included fragment: class/oc_pvc.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_pvc.py -*- -*- -*-
+
+#pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for pvc
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, required=True, type='str'),
+ namespace=dict(default=None, required=True, type='str'),
+ volume_capacity=dict(default='1G', type='str'),
+ access_modes=dict(default='ReadWriteOnce',
+ choices=['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany'],
+ type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCPVC.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ return module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_pvc.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index 97dd310bc..755ab3b02 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -179,17 +179,15 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/route -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -214,13 +212,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -236,13 +234,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -264,7 +262,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -353,7 +351,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -453,7 +451,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -572,8 +570,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -634,7 +632,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -660,7 +668,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -692,114 +700,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
- if module.params['src']:
+ state = params['state']
+
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
- if rval[0] and module.params['src']:
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -900,11 +943,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -922,7 +969,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -939,13 +986,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -965,9 +1012,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -982,10 +1029,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -998,16 +1045,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1088,9 +1135,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1112,7 +1159,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 56e4e38f7..0c83338b0 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -123,17 +123,15 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/scale -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -158,13 +156,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -180,13 +178,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -208,7 +206,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -297,7 +295,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -397,7 +395,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -516,8 +514,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -578,7 +576,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -604,7 +612,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -636,114 +644,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
- if module.params['src']:
+ state = params['state']
+
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
- if rval[0] and module.params['src']:
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -844,11 +887,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -866,7 +913,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -883,13 +930,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -909,9 +956,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -926,10 +973,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -942,16 +989,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1032,9 +1079,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1056,7 +1103,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index ad32d4900..26e52a926 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -169,17 +169,15 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/secret -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -204,13 +202,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -226,13 +224,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -254,7 +252,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -343,7 +341,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -443,7 +441,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -562,8 +560,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -624,7 +622,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -650,7 +658,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -682,114 +690,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
- if module.params['src']:
+ state = params['state']
+
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
- if rval[0] and module.params['src']:
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -890,11 +933,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -912,7 +959,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -929,13 +976,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -955,9 +1002,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -972,10 +1019,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -988,16 +1035,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1078,9 +1125,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1102,7 +1149,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index a4d0ca3f3..440cda1b3 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -175,17 +175,15 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/service -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -210,13 +208,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -232,13 +230,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -260,7 +258,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -349,7 +347,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -449,7 +447,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -568,8 +566,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -630,7 +628,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -656,7 +664,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -688,114 +696,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
- if module.params['src']:
+ state = params['state']
+
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
- if rval[0] and module.params['src']:
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -896,11 +939,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -918,7 +965,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -935,13 +982,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -961,9 +1008,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -978,10 +1025,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -994,16 +1041,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1084,9 +1131,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1108,7 +1155,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index b6586fca9..5eb36ee32 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -121,17 +121,15 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/serviceaccount -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -156,13 +154,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -178,13 +176,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -206,7 +204,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -295,7 +293,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -395,7 +393,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -514,8 +512,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -576,7 +574,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -602,7 +610,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -634,114 +642,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
- if module.params['src']:
+ state = params['state']
+
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
- if rval[0] and module.params['src']:
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -842,11 +885,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -864,7 +911,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -881,13 +928,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -907,9 +954,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -924,10 +971,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -940,16 +987,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1030,9 +1077,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1054,7 +1101,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index 925a5a088..1bc788e87 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -121,17 +121,15 @@ EXAMPLES = '''
# -*- -*- -*- End included fragment: doc/serviceaccount_secret -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -156,13 +154,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -178,13 +176,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -206,7 +204,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -295,7 +293,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -395,7 +393,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -514,8 +512,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -576,7 +574,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -602,7 +610,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -634,114 +642,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
- if module.params['src']:
+ state = params['state']
+
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
- if rval[0] and module.params['src']:
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -842,11 +885,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -864,7 +911,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -881,13 +928,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -907,9 +954,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -924,10 +971,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -940,16 +987,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1030,9 +1077,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1054,7 +1101,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
new file mode 100644
index 000000000..3009e661a
--- /dev/null
+++ b/roles/lib_openshift/library/oc_user.py
@@ -0,0 +1,1761 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/user -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_user
+short_description: Create, modify, and idempotently manage openshift users.
+description:
+ - Modify openshift users programmatically.
+options:
+ state:
+ description:
+ - State controls the action that will be taken with resource
+ - 'present' will create or update a user to the desired state
+ - 'absent' will ensure user is removed
+ - 'list' will read and return a list of users
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ username:
+ description:
+ - Short username to query/modify.
+ required: false
+ default: None
+ aliases: []
+ full_name:
+ description:
+ - String with the full name/description of the user.
+ required: false
+ default: None
+ aliases: []
+ groups:
+ description:
+ - List of groups the user should be a member of. This does not add/update the legacy 'groups' field in the OpenShift user object, but makes user entries into the appropriate OpenShift group object for the given user.
+ required: false
+ default: []
+ aliases: []
+author:
+- "Joel Diaz <jdiaz@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: Ensure user exists
+ oc_user:
+ state: present
+ username: johndoe
+ full_name "John Doe"
+ groups:
+ - dedicated-admins
+ register: user_johndoe
+
+user_johndoe variable will have contents like:
+ok: [ded-int-aws-master-61034] => {
+ "user_johndoe": {
+ "changed": true,
+ "results": {
+ "cmd": "oc -n default get users johndoe -o json",
+ "results": [
+ {
+ "apiVersion": "v1",
+ "fullName": "John DOe",
+ "groups": null,
+ "identities": null,
+ "kind": "User",
+ "metadata": {
+ "creationTimestamp": "2017-02-28T15:09:21Z",
+ "name": "johndoe",
+ "resourceVersion": "848781",
+ "selfLink": "/oapi/v1/users/johndoe",
+ "uid": "e23d3300-fdc7-11e6-9e3e-12822d6b7656"
+ }
+ }
+ ],
+ "returncode": 0
+ },
+ "state": "present"
+ }
+}
+'groups' is empty because this field is the OpenShift user object's 'group' field.
+
+- name: Ensure user does not exist
+ oc_user:
+ state: absent
+ username: johndoe
+
+- name: List user's info
+ oc_user:
+ state: list
+ username: johndoe
+ register: user_johndoe
+
+user_johndoe will have contents similar to:
+ok: [ded-int-aws-master-61034] => {
+ "user_johndoe": {
+ "changed": false,
+ "results": [
+ {
+ "apiVersion": "v1",
+ "fullName": "John Doe",
+ "groups": null,
+ "identities": null,
+ "kind": "User",
+ "metadata": {
+ "creationTimestamp": "2017-02-28T15:04:44Z",
+ "name": "johndoe",
+ "resourceVersion": "848280",
+ "selfLink": "/oapi/v1/users/johndoe",
+ "uid": "3d479ad2-fdc7-11e6-9e3e-12822d6b7656"
+ }
+ }
+ ],
+ "state": "list"
+ }
+}
+'''
+
+# -*- -*- -*- End included fragment: doc/user -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+
+class YeditException(Exception): # pragma: no cover
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object): # pragma: no cover
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for separator '''
+ return self._separator
+
+ @separator.setter
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is not None:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ elif isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.safe_load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
+
+ return inc_value
+
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
+
+ if params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and state != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': state}
+
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
+ else:
+ rval = yamlfile.delete(params['key'])
+
+ if rval[0] and params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
+
+ elif state == 'present':
+ # check if content is different than what is in the file
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+
+ yamlfile.yaml_dict = content
+
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
+
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
+
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
+ yamlfile.write()
+
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
+
+ # no edits to make
+ if params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': state}
+
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, name=None, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, name=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ cmd.append('--schedulable={}'.format(schedulable))
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ if grace_period:
+ cmd.append('--grace-period={}'.format(int(grace_period)))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode(), stderr.decode()
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object): # pragma: no cover
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/user.py -*- -*- -*-
+
+
+class UserConfig(object):
+ ''' Handle user options '''
+ def __init__(self,
+ kubeconfig,
+ username,
+ full_name):
+ ''' constructor for handling user options '''
+ self.kubeconfig = kubeconfig
+ self.username = username
+ self.full_name = full_name
+
+ self.data = {}
+ self.create_dict()
+
+ def create_dict(self):
+ ''' return a user as a dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['fullName'] = self.full_name
+ self.data['groups'] = None
+ self.data['identities'] = None
+ self.data['kind'] = 'User'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.username
+
+
+# pylint: disable=too-many-instance-attributes
+class User(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'user'
+
+ def __init__(self, content):
+ '''User constructor'''
+ super(User, self).__init__(content=content)
+
+# -*- -*- -*- End included fragment: lib/user.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_user.py -*- -*- -*-
+
+# pylint: disable=too-many-instance-attributes
+class OCUser(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'users'
+
+ def __init__(self,
+ config,
+ groups=None,
+ verbose=False):
+ ''' Constructor for OCUser '''
+ # namespace has no meaning for user operations, hardcode to 'default'
+ super(OCUser, self).__init__('default', config.kubeconfig)
+ self.config = config
+ self.groups = groups
+ self._user = None
+
+ @property
+ def user(self):
+ ''' property function user'''
+ if not self._user:
+ self.get()
+ return self._user
+
+ @user.setter
+ def user(self, data):
+ ''' setter function for user '''
+ self._user = data
+
+ def exists(self):
+ ''' return whether a user exists '''
+ if self.user:
+ return True
+
+ return False
+
+ def get(self):
+ ''' return user information '''
+ result = self._get(self.kind, self.config.username)
+ if result['returncode'] == 0:
+ self.user = User(content=result['results'][0])
+ elif 'users \"%s\" not found' % self.config.username in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ ''' delete the object '''
+ return self._delete(self.kind, self.config.username)
+
+ def create_group_entries(self):
+ ''' make entries for user to the provided group list '''
+ if self.groups != None:
+ for group in self.groups:
+ cmd = ['groups', 'add-users', group, self.config.username]
+ rval = self.openshift_cmd(cmd, oadm=True)
+ if rval['returncode'] != 0:
+ return rval
+
+ return rval
+
+ return {'returncode': 0}
+
+ def create(self):
+ ''' create the object '''
+ rval = self.create_group_entries()
+ if rval['returncode'] != 0:
+ return rval
+
+ return self._create_from_content(self.config.username, self.config.data)
+
+ def group_update(self):
+ ''' update group membership '''
+ rval = {'returncode': 0}
+ cmd = ['get', 'groups', '-o', 'json']
+ all_groups = self.openshift_cmd(cmd, output=True)
+
+ # pylint misindentifying all_groups['results']['items'] type
+ # pylint: disable=invalid-sequence-index
+ for group in all_groups['results']['items']:
+ # If we're supposed to be in this group
+ if group['metadata']['name'] in self.groups \
+ and (group['users'] is None or self.config.username not in group['users']):
+ cmd = ['groups', 'add-users', group['metadata']['name'],
+ self.config.username]
+ rval = self.openshift_cmd(cmd, oadm=True)
+ if rval['returncode'] != 0:
+ return rval
+ # else if we're in the group, but aren't supposed to be
+ elif group['users'] != None and self.config.username in group['users'] \
+ and group['metadata']['name'] not in self.groups:
+ cmd = ['groups', 'remove-users', group['metadata']['name'],
+ self.config.username]
+ rval = self.openshift_cmd(cmd, oadm=True)
+ if rval['returncode'] != 0:
+ return rval
+
+ return rval
+
+ def update(self):
+ ''' update the object '''
+ rval = self.group_update()
+ if rval['returncode'] != 0:
+ return rval
+
+ # need to update the user's info
+ return self._replace_content(self.kind, self.config.username, self.config.data, force=True)
+
+ def needs_group_update(self):
+ ''' check if there are group membership changes '''
+ cmd = ['get', 'groups', '-o', 'json']
+ all_groups = self.openshift_cmd(cmd, output=True)
+
+ # pylint misindentifying all_groups['results']['items'] type
+ # pylint: disable=invalid-sequence-index
+ for group in all_groups['results']['items']:
+ # If we're supposed to be in this group
+ if group['metadata']['name'] in self.groups \
+ and (group['users'] is None or self.config.username not in group['users']):
+ return True
+ # else if we're in the group, but aren't supposed to be
+ elif group['users'] != None and self.config.username in group['users'] \
+ and group['metadata']['name'] not in self.groups:
+ return True
+
+ return False
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ skip = []
+ if self.needs_group_update():
+ return True
+
+ return not Utils.check_def_equal(self.config.data, self.user.yaml_dict, skip_keys=skip, debug=True)
+
+ # pylint: disable=too-many-return-statements
+ @staticmethod
+ def run_ansible(params, check_mode=False):
+ ''' run the idempotent ansible code
+
+ params comes from the ansible portion of this module
+ check_mode: does the module support check mode. (module.check_mode)
+ '''
+
+ uconfig = UserConfig(params['kubeconfig'],
+ params['username'],
+ params['full_name'],
+ )
+
+ oc_user = OCUser(uconfig, params['groups'],
+ verbose=params['debug'])
+ state = params['state']
+
+ api_rval = oc_user.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': "list"}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_user.exists():
+
+ if check_mode:
+ return {'changed': False, 'msg': 'Would have performed a delete.'}
+
+ api_rval = oc_user.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': "absent"}
+ return {'changed': False, 'state': "absent"}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_user.exists():
+
+ if check_mode:
+ return {'changed': False, 'msg': 'Would have performed a create.'}
+
+ # Create it here
+ api_rval = oc_user.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_user.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': "present"}
+
+ ########
+ # Update
+ ########
+ if oc_user.needs_update():
+ api_rval = oc_user.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ orig_cmd = api_rval['cmd']
+ # return the created object
+ api_rval = oc_user.get()
+ # overwrite the get/list cmd
+ api_rval['cmd'] = orig_cmd
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': "present"}
+
+ return {'changed': False, 'results': api_rval, 'state': "present"}
+
+ return {'failed': True,
+ 'changed': False,
+ 'results': 'Unknown state passed. %s' % state,
+ 'state': "unknown"}
+
+# -*- -*- -*- End included fragment: class/oc_user.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_user.py -*- -*- -*-
+
+def main():
+ '''
+ ansible oc module for user
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ username=dict(default=None, type='str'),
+ full_name=dict(default=None, type='str'),
+ # setting groups for user data will not populate the
+ # 'groups' field in the user data.
+ # it will call out to the group data and make the user
+ # entry there
+ groups=dict(default=[], type='list'),
+ ),
+ supports_check_mode=True,
+ )
+
+ results = OCUser.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_user.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index 8f59d4d7e..88f295a74 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -93,17 +93,15 @@ oc_version:
# -*- -*- -*- End included fragment: doc/version -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
-class YeditException(Exception):
+class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
-class Yedit(object):
+class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
@@ -128,13 +126,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -150,13 +148,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -178,7 +176,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -267,7 +265,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -367,7 +365,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -486,8 +484,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -548,7 +546,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -574,7 +582,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -606,114 +614,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
- if module.params['src']:
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
- if rval[0] and module.params['src']:
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
@@ -814,11 +857,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -836,7 +883,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -853,13 +900,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -879,9 +926,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -896,10 +943,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -912,16 +959,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -1002,9 +1049,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
@@ -1026,7 +1073,7 @@ class OpenShiftCLI(object):
return rval
-class Utils(object):
+class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
new file mode 100644
index 000000000..5f936fb49
--- /dev/null
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -0,0 +1,2071 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/volume -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_volume
+short_description: Create, modify, and idempotently manage openshift volumes.
+description:
+ - Modify openshift volumes programmatically.
+options:
+ state:
+ description:
+ - State controls the action that will be taken with resource
+ - 'present' will create or update and object to the desired state
+ - 'absent' will ensure volumes are removed
+ - 'list' will read the volumes
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ namespace:
+ description:
+ - The name of the namespace where the object lives
+ required: false
+ default: default
+ aliases: []
+ kind:
+ description:
+ - The kind of object that can be managed.
+ default: dc
+ choices:
+ - dc
+ - rc
+ - pods
+ aliases: []
+ mount_type:
+ description:
+ - The type of volume to be used
+ required: false
+ default: None
+ choices:
+ - emptydir
+ - hostpath
+ - secret
+ - pvc
+ - configmap
+ aliases: []
+ mount_path:
+ description:
+ - The path to where the mount will be attached
+ required: false
+ default: None
+ aliases: []
+ secret_name:
+ description:
+ - The name of the secret. Used when mount_type is secret.
+ required: false
+ default: None
+ aliases: []
+ claim_size:
+ description:
+ - The size in GB of the pv claim. e.g. 100G
+ required: false
+ default: None
+ aliases: []
+ claim_name:
+ description:
+ - The name of the pv claim
+ required: false
+ default: None
+ aliases: []
+ configmap_name:
+ description:
+ - The name of the configmap
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: attach storage volumes to deploymentconfig
+ oc_volume:
+ namespace: logging
+ kind: dc
+ name: name_of_the_dc
+ mount_type: pvc
+ claim_name: loggingclaim
+ claim_size: 100G
+ vol_name: logging-storage
+ run_once: true
+'''
+
+# -*- -*- -*- End included fragment: doc/volume -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+
+class YeditException(Exception): # pragma: no cover
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object): # pragma: no cover
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for separator '''
+ return self._separator
+
+ @separator.setter
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is not None:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ elif isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.safe_load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
+
+ return inc_value
+
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
+
+ if params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and state != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': state}
+
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
+ else:
+ rval = yamlfile.delete(params['key'])
+
+ if rval[0] and params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
+
+ elif state == 'present':
+ # check if content is different than what is in the file
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+
+ yamlfile.yaml_dict = content
+
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
+
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
+
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
+ yamlfile.write()
+
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
+
+ # no edits to make
+ if params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': state}
+
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, name=None, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, name=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ cmd.append('--schedulable={}'.format(schedulable))
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ if grace_period:
+ cmd.append('--grace-period={}'.format(int(grace_period)))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode(), stderr.decode()
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object): # pragma: no cover
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
+
+
+# pylint: disable=too-many-public-methods
+class DeploymentConfig(Yedit):
+ ''' Class to model an openshift DeploymentConfig'''
+ default_deployment_config = '''
+apiVersion: v1
+kind: DeploymentConfig
+metadata:
+ name: default_dc
+ namespace: default
+spec:
+ replicas: 0
+ selector:
+ default_dc: default_dc
+ strategy:
+ resources: {}
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 0
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePercent: -25
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ spec:
+ containers:
+ - env:
+ - name: default
+ value: default
+ image: default
+ imagePullPolicy: IfNotPresent
+ name: default_dc
+ ports:
+ - containerPort: 8000
+ hostPort: 8000
+ protocol: TCP
+ name: default_port
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ hostNetwork: true
+ nodeSelector:
+ type: compute
+ restartPolicy: Always
+ securityContext: {}
+ serviceAccount: default
+ serviceAccountName: default
+ terminationGracePeriodSeconds: 30
+ triggers:
+ - type: ConfigChange
+'''
+
+ replicas_path = "spec.replicas"
+ env_path = "spec.template.spec.containers[0].env"
+ volumes_path = "spec.template.spec.volumes"
+ container_path = "spec.template.spec.containers"
+ volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
+
+ def __init__(self, content=None):
+ ''' Constructor for deploymentconfig '''
+ if not content:
+ content = DeploymentConfig.default_deployment_config
+
+ super(DeploymentConfig, self).__init__(content=content)
+
+ def add_env_value(self, key, value):
+ ''' add key, value pair to env array '''
+ rval = False
+ env = self.get_env_vars()
+ if env:
+ env.append({'name': key, 'value': value})
+ rval = True
+ else:
+ result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
+ rval = result[0]
+
+ return rval
+
+ def exists_env_value(self, key, value):
+ ''' return whether a key, value pair exists '''
+ results = self.get_env_vars()
+ if not results:
+ return False
+
+ for result in results:
+ if result['name'] == key and result['value'] == value:
+ return True
+
+ return False
+
+ def exists_env_key(self, key):
+ ''' return whether a key, value pair exists '''
+ results = self.get_env_vars()
+ if not results:
+ return False
+
+ for result in results:
+ if result['name'] == key:
+ return True
+
+ return False
+
+ def get_env_var(self, key):
+ '''return a environment variables '''
+ results = self.get(DeploymentConfig.env_path) or []
+ if not results:
+ return None
+
+ for env_var in results:
+ if env_var['name'] == key:
+ return env_var
+
+ return None
+
+ def get_env_vars(self):
+ '''return a environment variables '''
+ return self.get(DeploymentConfig.env_path) or []
+
+ def delete_env_var(self, keys):
+ '''delete a list of keys '''
+ if not isinstance(keys, list):
+ keys = [keys]
+
+ env_vars_array = self.get_env_vars()
+ modified = False
+ idx = None
+ for key in keys:
+ for env_idx, env_var in enumerate(env_vars_array):
+ if env_var['name'] == key:
+ idx = env_idx
+ break
+
+ if idx:
+ modified = True
+ del env_vars_array[idx]
+
+ if modified:
+ return True
+
+ return False
+
+ def update_env_var(self, key, value):
+ '''place an env in the env var list'''
+
+ env_vars_array = self.get_env_vars()
+ idx = None
+ for env_idx, env_var in enumerate(env_vars_array):
+ if env_var['name'] == key:
+ idx = env_idx
+ break
+
+ if idx:
+ env_vars_array[idx]['value'] = value
+ else:
+ self.add_env_value(key, value)
+
+ return True
+
+ def exists_volume_mount(self, volume_mount):
+ ''' return whether a volume mount exists '''
+ exist_volume_mounts = self.get_volume_mounts()
+
+ if not exist_volume_mounts:
+ return False
+
+ volume_mount_found = False
+ for exist_volume_mount in exist_volume_mounts:
+ if exist_volume_mount['name'] == volume_mount['name']:
+ volume_mount_found = True
+ break
+
+ return volume_mount_found
+
+ def exists_volume(self, volume):
+ ''' return whether a volume exists '''
+ exist_volumes = self.get_volumes()
+
+ volume_found = False
+ for exist_volume in exist_volumes:
+ if exist_volume['name'] == volume['name']:
+ volume_found = True
+ break
+
+ return volume_found
+
+ def find_volume_by_name(self, volume, mounts=False):
+ ''' return the index of a volume '''
+ volumes = []
+ if mounts:
+ volumes = self.get_volume_mounts()
+ else:
+ volumes = self.get_volumes()
+ for exist_volume in volumes:
+ if exist_volume['name'] == volume['name']:
+ return exist_volume
+
+ return None
+
+ def get_replicas(self):
+ ''' return replicas setting '''
+ return self.get(DeploymentConfig.replicas_path)
+
+ def get_volume_mounts(self):
+ '''return volume mount information '''
+ return self.get_volumes(mounts=True)
+
+ def get_volumes(self, mounts=False):
+ '''return volume mount information '''
+ if mounts:
+ return self.get(DeploymentConfig.volume_mounts_path) or []
+
+ return self.get(DeploymentConfig.volumes_path) or []
+
+ def delete_volume_by_name(self, volume):
+ '''delete a volume '''
+ modified = False
+ exist_volume_mounts = self.get_volume_mounts()
+ exist_volumes = self.get_volumes()
+ del_idx = None
+ for idx, exist_volume in enumerate(exist_volumes):
+ if 'name' in exist_volume and exist_volume['name'] == volume['name']:
+ del_idx = idx
+ break
+
+ if del_idx != None:
+ del exist_volumes[del_idx]
+ modified = True
+
+ del_idx = None
+ for idx, exist_volume_mount in enumerate(exist_volume_mounts):
+ if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']:
+ del_idx = idx
+ break
+
+ if del_idx != None:
+ del exist_volume_mounts[idx]
+ modified = True
+
+ return modified
+
+ def add_volume_mount(self, volume_mount):
+ ''' add a volume or volume mount to the proper location '''
+ exist_volume_mounts = self.get_volume_mounts()
+
+ if not exist_volume_mounts and volume_mount:
+ self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
+ else:
+ exist_volume_mounts.append(volume_mount)
+
+ def add_volume(self, volume):
+ ''' add a volume or volume mount to the proper location '''
+ exist_volumes = self.get_volumes()
+ if not volume:
+ return
+
+ if not exist_volumes:
+ self.put(DeploymentConfig.volumes_path, [volume])
+ else:
+ exist_volumes.append(volume)
+
+ def update_replicas(self, replicas):
+ ''' update replicas value '''
+ self.put(DeploymentConfig.replicas_path, replicas)
+
+ def update_volume(self, volume):
+ '''place an env in the env var list'''
+ exist_volumes = self.get_volumes()
+
+ if not volume:
+ return False
+
+ # update the volume
+ update_idx = None
+ for idx, exist_vol in enumerate(exist_volumes):
+ if exist_vol['name'] == volume['name']:
+ update_idx = idx
+ break
+
+ if update_idx != None:
+ exist_volumes[update_idx] = volume
+ else:
+ self.add_volume(volume)
+
+ return True
+
+ def update_volume_mount(self, volume_mount):
+ '''place an env in the env var list'''
+ modified = False
+
+ exist_volume_mounts = self.get_volume_mounts()
+
+ if not volume_mount:
+ return False
+
+ # update the volume mount
+ for exist_vol_mount in exist_volume_mounts:
+ if exist_vol_mount['name'] == volume_mount['name']:
+ if 'mountPath' in exist_vol_mount and \
+ str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
+ exist_vol_mount['mountPath'] = volume_mount['mountPath']
+ modified = True
+ break
+
+ if not modified:
+ self.add_volume_mount(volume_mount)
+ modified = True
+
+ return modified
+
+ def needs_update_volume(self, volume, volume_mount):
+ ''' verify a volume update is needed '''
+ exist_volume = self.find_volume_by_name(volume)
+ exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
+ results = []
+ results.append(exist_volume['name'] == volume['name'])
+
+ if 'secret' in volume:
+ results.append('secret' in exist_volume)
+ results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
+ results.append(exist_volume_mount['name'] == volume_mount['name'])
+ results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
+
+ elif 'emptyDir' in volume:
+ results.append(exist_volume_mount['name'] == volume['name'])
+ results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
+
+ elif 'persistentVolumeClaim' in volume:
+ pvc = 'persistentVolumeClaim'
+ results.append(pvc in exist_volume)
+ if results[-1]:
+ results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])
+
+ if 'claimSize' in volume[pvc]:
+ results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])
+
+ elif 'hostpath' in volume:
+ results.append('hostPath' in exist_volume)
+ results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])
+
+ return not all(results)
+
+ def needs_update_replicas(self, replicas):
+ ''' verify whether a replica update is needed '''
+ current_reps = self.get(DeploymentConfig.replicas_path)
+ return not current_reps == replicas
+
+# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*-
+
+class Volume(object):
+ ''' Class to represent an openshift volume object'''
+ volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
+ "dc": "spec.template.spec.containers[0].volumeMounts",
+ "rc": "spec.template.spec.containers[0].volumeMounts",
+ }
+ volumes_path = {"pod": "spec.volumes",
+ "dc": "spec.template.spec.volumes",
+ "rc": "spec.template.spec.volumes",
+ }
+
+ @staticmethod
+ def create_volume_structure(volume_info):
+ ''' return a properly structured volume '''
+ volume_mount = None
+ volume = {'name': volume_info['name']}
+ volume_type = volume_info['type'].lower()
+ if volume_type == 'secret':
+ volume['secret'] = {}
+ volume[volume_info['type']] = {'secretName': volume_info['secret_name']}
+ volume_mount = {'mountPath': volume_info['path'],
+ 'name': volume_info['name']}
+ elif volume_type == 'emptydir':
+ volume['emptyDir'] = {}
+ volume_mount = {'mountPath': volume_info['path'],
+ 'name': volume_info['name']}
+ elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':
+ volume['persistentVolumeClaim'] = {}
+ volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']
+ volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize']
+ elif volume_type == 'hostpath':
+ volume['hostPath'] = {}
+ volume['hostPath']['path'] = volume_info['path']
+ elif volume_type == 'configmap':
+ volume['configMap'] = {}
+ volume['configMap']['name'] = volume_info['configmap_name']
+ volume_mount = {'mountPath': volume_info['path'],
+ 'name': volume_info['name']}
+
+ return (volume, volume_mount)
+
+# -*- -*- -*- End included fragment: lib/volume.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_volume.py -*- -*- -*-
+
+
+# pylint: disable=too-many-instance-attributes
+class OCVolume(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
+ "dc": "spec.template.spec.containers[0].volumeMounts",
+ "rc": "spec.template.spec.containers[0].volumeMounts",
+ }
+ volumes_path = {"pod": "spec.volumes",
+ "dc": "spec.template.spec.volumes",
+ "rc": "spec.template.spec.volumes",
+ }
+
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ kind,
+ resource_name,
+ namespace,
+ vol_name,
+ mount_path,
+ mount_type,
+ secret_name,
+ claim_size,
+ claim_name,
+ configmap_name,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for OCVolume '''
+ super(OCVolume, self).__init__(namespace, kubeconfig)
+ self.kind = kind
+ self.volume_info = {'name': vol_name,
+ 'secret_name': secret_name,
+ 'path': mount_path,
+ 'type': mount_type,
+ 'claimSize': claim_size,
+ 'claimName': claim_name,
+ 'configmap_name': configmap_name}
+ self.volume, self.volume_mount = Volume.create_volume_structure(self.volume_info)
+ self.name = resource_name
+ self.namespace = namespace
+ self.kubeconfig = kubeconfig
+ self.verbose = verbose
+ self._resource = None
+
+ @property
+ def resource(self):
+ ''' property function for resource var '''
+ if not self._resource:
+ self.get()
+ return self._resource
+
+ @resource.setter
+ def resource(self, data):
+ ''' setter function for resource var '''
+ self._resource = data
+
+ def exists(self):
+ ''' return whether a volume exists '''
+ volume_mount_found = False
+ volume_found = self.resource.exists_volume(self.volume)
+ if not self.volume_mount and volume_found:
+ return True
+
+ if self.volume_mount:
+ volume_mount_found = self.resource.exists_volume_mount(self.volume_mount)
+
+ if volume_found and self.volume_mount and volume_mount_found:
+ return True
+
+ return False
+
+ def get(self):
+ '''return volume information '''
+ vol = self._get(self.kind, self.name)
+ if vol['returncode'] == 0:
+ if self.kind == 'dc':
+ self.resource = DeploymentConfig(content=vol['results'][0])
+ vol['results'] = self.resource.get_volumes()
+
+ return vol
+
+ def delete(self):
+ '''remove a volume'''
+ self.resource.delete_volume_by_name(self.volume)
+ return self._replace_content(self.kind, self.name, self.resource.yaml_dict)
+
+ def put(self):
+ '''place volume into dc '''
+ self.resource.update_volume(self.volume)
+ self.resource.get_volumes()
+ self.resource.update_volume_mount(self.volume_mount)
+ return self._replace_content(self.kind, self.name, self.resource.yaml_dict)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ return self.resource.needs_update_volume(self.volume, self.volume_mount)
+
+ # pylint: disable=too-many-branches,too-many-return-statements
+ @staticmethod
+ def run_ansible(params, check_mode=False):
+ '''run the idempotent ansible code'''
+ oc_volume = OCVolume(params['kind'],
+ params['name'],
+ params['namespace'],
+ params['vol_name'],
+ params['mount_path'],
+ params['mount_type'],
+ # secrets
+ params['secret_name'],
+ # pvc
+ params['claim_size'],
+ params['claim_name'],
+ # configmap
+ params['configmap_name'],
+ kubeconfig=params['kubeconfig'],
+ verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_volume.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_volume.exists():
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ api_rval = oc_volume.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_volume.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ # Create it here
+ api_rval = oc_volume.put()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_volume.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if oc_volume.needs_update():
+ api_rval = oc_volume.put()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_volume.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, state: state}
+
+ return {'changed': False, 'results': api_rval, state: state}
+
+ return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
+
+# -*- -*- -*- End included fragment: class/oc_volume.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_volume.py -*- -*- -*-
+
+def main():
+ '''
+ ansible oc module for volumes
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ kind=dict(default='dc', choices=['dc', 'rc', 'pods'], type='str'),
+ namespace=dict(default='default', type='str'),
+ vol_name=dict(default=None, type='str'),
+ name=dict(default=None, type='str'),
+ mount_type=dict(default=None,
+ choices=['emptydir', 'hostpath', 'secret', 'pvc', 'configmap'],
+ type='str'),
+ mount_path=dict(default=None, type='str'),
+ # secrets require a name
+ secret_name=dict(default=None, type='str'),
+ # pvc requires a size
+ claim_size=dict(default=None, type='str'),
+ claim_name=dict(default=None, type='str'),
+ # configmap requires a name
+ configmap_name=dict(default=None, type='str'),
+ ),
+ supports_check_mode=True,
+ )
+ rval = OCVolume.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_volume.py -*- -*- -*-
diff --git a/roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py b/roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py
index c80c2eb44..10f1c9b4b 100644
--- a/roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py
@@ -20,6 +20,7 @@ def main():
signer_key=dict(default='/etc/origin/master/ca.key', type='str'),
signer_serial=dict(default='/etc/origin/master/ca.serial.txt', type='str'),
hostnames=dict(default=[], type='list'),
+ expire_days=dict(default=None, type='int'),
),
supports_check_mode=True,
)
diff --git a/roles/lib_openshift/src/ansible/oadm_manage_node.py b/roles/lib_openshift/src/ansible/oc_adm_manage_node.py
index b870c1211..b870c1211 100644
--- a/roles/lib_openshift/src/ansible/oadm_manage_node.py
+++ b/roles/lib_openshift/src/ansible/oc_adm_manage_node.py
diff --git a/roles/lib_openshift/src/ansible/oc_clusterrole.py b/roles/lib_openshift/src/ansible/oc_clusterrole.py
new file mode 100644
index 000000000..7e4319d2c
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_clusterrole.py
@@ -0,0 +1,29 @@
+# pylint: skip-file
+# flake8: noqa
+
+def main():
+ '''
+ ansible oc module for clusterrole
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, type='str'),
+ rules=dict(default=None, type='list'),
+ ),
+ supports_check_mode=True,
+ )
+
+ results = OCClusterRole.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_configmap.py b/roles/lib_openshift/src/ansible/oc_configmap.py
new file mode 100644
index 000000000..974f72499
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_configmap.py
@@ -0,0 +1,32 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+def main():
+ '''
+ ansible oc module for managing OpenShift configmap objects
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default=None, required=True, type='str'),
+ from_file=dict(default=None, type='dict'),
+ from_literal=dict(default=None, type='dict'),
+ ),
+ supports_check_mode=True,
+ )
+
+
+ rval = OCConfigMap.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_group.py b/roles/lib_openshift/src/ansible/oc_group.py
new file mode 100644
index 000000000..9294286d6
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_group.py
@@ -0,0 +1,32 @@
+# pylint: skip-file
+# flake8: noqa
+
+#pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for group
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, type='str'),
+ namespace=dict(default='default', type='str'),
+ # addind users to a group is handled through the oc_users module
+ #users=dict(default=None, type='list'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCGroup.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in rval:
+ return module.fail_json(**rval)
+
+ return module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_image.py b/roles/lib_openshift/src/ansible/oc_image.py
new file mode 100644
index 000000000..447d62f20
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_image.py
@@ -0,0 +1,34 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+def main():
+ '''
+ ansible oc module for image import
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'list']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ registry_url=dict(default=None, type='str'),
+ image_name=dict(default=None, required=True, type='str'),
+ image_tag=dict(default=None, type='str'),
+ force=dict(default=False, type='bool'),
+ ),
+
+ supports_check_mode=True,
+ )
+
+ rval = OCImage.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_obj.py b/roles/lib_openshift/src/ansible/oc_obj.py
index 701740e4f..6ab53d044 100644
--- a/roles/lib_openshift/src/ansible/oc_obj.py
+++ b/roles/lib_openshift/src/ansible/oc_obj.py
@@ -23,7 +23,7 @@ def main():
force=dict(default=False, type='bool'),
selector=dict(default=None, type='str'),
),
- mutually_exclusive=[["content", "files"]],
+ mutually_exclusive=[["content", "files"], ["selector", "name"]],
supports_check_mode=True,
)
diff --git a/roles/lib_openshift/src/ansible/oc_pvc.py b/roles/lib_openshift/src/ansible/oc_pvc.py
new file mode 100644
index 000000000..a5181e281
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_pvc.py
@@ -0,0 +1,35 @@
+# pylint: skip-file
+# flake8: noqa
+
+#pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for pvc
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, required=True, type='str'),
+ namespace=dict(default=None, required=True, type='str'),
+ volume_capacity=dict(default='1G', type='str'),
+ access_modes=dict(default='ReadWriteOnce',
+ choices=['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany'],
+ type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCPVC.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ return module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_user.py b/roles/lib_openshift/src/ansible/oc_user.py
new file mode 100644
index 000000000..6b1440796
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_user.py
@@ -0,0 +1,34 @@
+# pylint: skip-file
+# flake8: noqa
+
+def main():
+ '''
+ ansible oc module for user
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ username=dict(default=None, type='str'),
+ full_name=dict(default=None, type='str'),
+ # setting groups for user data will not populate the
+ # 'groups' field in the user data.
+ # it will call out to the group data and make the user
+ # entry there
+ groups=dict(default=[], type='list'),
+ ),
+ supports_check_mode=True,
+ )
+
+ results = OCUser.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_volume.py b/roles/lib_openshift/src/ansible/oc_volume.py
new file mode 100644
index 000000000..660376d2f
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_volume.py
@@ -0,0 +1,41 @@
+# pylint: skip-file
+# flake8: noqa
+
+def main():
+ '''
+ ansible oc module for volumes
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ kind=dict(default='dc', choices=['dc', 'rc', 'pods'], type='str'),
+ namespace=dict(default='default', type='str'),
+ vol_name=dict(default=None, type='str'),
+ name=dict(default=None, type='str'),
+ mount_type=dict(default=None,
+ choices=['emptydir', 'hostpath', 'secret', 'pvc', 'configmap'],
+ type='str'),
+ mount_path=dict(default=None, type='str'),
+ # secrets require a name
+ secret_name=dict(default=None, type='str'),
+ # pvc requires a size
+ claim_size=dict(default=None, type='str'),
+ claim_name=dict(default=None, type='str'),
+ # configmap requires a name
+ configmap_name=dict(default=None, type='str'),
+ ),
+ supports_check_mode=True,
+ )
+ rval = OCVolume.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
index 18c69f2fa..f954f40ef 100644
--- a/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
@@ -78,6 +78,9 @@ class CAServerCert(OpenShiftCLI):
if proc.returncode == 0:
regex = re.compile(r"^\s*X509v3 Subject Alternative Name:\s*?\n\s*(.*)\s*\n", re.MULTILINE)
match = regex.search(x509output) # E501
+ if not match:
+ return False
+
for entry in re.split(r", *", match.group(1)):
if entry.startswith('DNS') or entry.startswith('IP Address'):
cert_names.append(entry.split(':')[1])
@@ -102,6 +105,7 @@ class CAServerCert(OpenShiftCLI):
'signer_cert': {'value': params['signer_cert'], 'include': True},
'signer_key': {'value': params['signer_key'], 'include': True},
'signer_serial': {'value': params['signer_serial'], 'include': True},
+ 'expire_days': {'value': params['expire_days'], 'include': True},
'backup': {'value': params['backup'], 'include': False},
})
@@ -123,7 +127,7 @@ class CAServerCert(OpenShiftCLI):
api_rval = server_cert.create()
if api_rval['returncode'] != 0:
- return {'Failed': True, 'msg': api_rval}
+ return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
diff --git a/roles/lib_openshift/src/class/oadm_manage_node.py b/roles/lib_openshift/src/class/oc_adm_manage_node.py
index c07320477..6d9f24baa 100644
--- a/roles/lib_openshift/src/class/oadm_manage_node.py
+++ b/roles/lib_openshift/src/class/oc_adm_manage_node.py
@@ -44,7 +44,7 @@ class ManageNode(OpenShiftCLI):
if selector:
_sel = selector
- results = self._get('node', rname=_node, selector=_sel)
+ results = self._get('node', name=_node, selector=_sel)
if results['returncode'] != 0:
return results
diff --git a/roles/lib_openshift/src/class/oc_adm_policy_group.py b/roles/lib_openshift/src/class/oc_adm_policy_group.py
index afb066c77..1e51913e0 100644
--- a/roles/lib_openshift/src/class/oc_adm_policy_group.py
+++ b/roles/lib_openshift/src/class/oc_adm_policy_group.py
@@ -41,6 +41,28 @@ class PolicyGroup(OpenShiftCLI):
self.verbose = verbose
self._rolebinding = None
self._scc = None
+ self._cluster_policy_bindings = None
+ self._policy_bindings = None
+
+ @property
+ def policybindings(self):
+ if self._policy_bindings is None:
+ results = self._get('clusterpolicybindings', None)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Could not retrieve policybindings')
+ self._policy_bindings = results['results'][0]['items'][0]
+
+ return self._policy_bindings
+
+ @property
+ def clusterpolicybindings(self):
+ if self._cluster_policy_bindings is None:
+ results = self._get('clusterpolicybindings', None)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Could not retrieve clusterpolicybindings')
+ self._cluster_policy_bindings = results['results'][0]['items'][0]
+
+ return self._cluster_policy_bindings
@property
def role_binding(self):
@@ -81,18 +103,24 @@ class PolicyGroup(OpenShiftCLI):
def exists_role_binding(self):
''' return whether role_binding exists '''
- results = self.get()
- if results['returncode'] == 0:
- self.role_binding = RoleBinding(results['results'][0])
- if self.role_binding.find_group_name(self.config.config_options['group']['value']) != None:
- return True
+ bindings = None
+ if self.config.config_options['resource_kind']['value'] == 'cluster-role':
+ bindings = self.clusterpolicybindings
+ else:
+ bindings = self.policybindings
+ if bindings is None:
return False
- elif self.config.config_options['name']['value'] in results['stderr'] and '" not found' in results['stderr']:
- return False
+ for binding in bindings['roleBindings']:
+ _rb = binding['roleBinding']
+ if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \
+ _rb['groupNames'] is not None and \
+ self.config.config_options['group']['value'] in _rb['groupNames']:
+ self.role_binding = binding
+ return True
- return results
+ return False
def exists_scc(self):
''' return whether scc exists '''
diff --git a/roles/lib_openshift/src/class/oc_adm_policy_user.py b/roles/lib_openshift/src/class/oc_adm_policy_user.py
index c9d53acfa..88fcc1ddc 100644
--- a/roles/lib_openshift/src/class/oc_adm_policy_user.py
+++ b/roles/lib_openshift/src/class/oc_adm_policy_user.py
@@ -40,6 +40,28 @@ class PolicyUser(OpenShiftCLI):
self.verbose = verbose
self._rolebinding = None
self._scc = None
+ self._cluster_policy_bindings = None
+ self._policy_bindings = None
+
+ @property
+ def policybindings(self):
+ if self._policy_bindings is None:
+ results = self._get('clusterpolicybindings', None)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Could not retrieve policybindings')
+ self._policy_bindings = results['results'][0]['items'][0]
+
+ return self._policy_bindings
+
+ @property
+ def clusterpolicybindings(self):
+ if self._cluster_policy_bindings is None:
+ results = self._get('clusterpolicybindings', None)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Could not retrieve clusterpolicybindings')
+ self._cluster_policy_bindings = results['results'][0]['items'][0]
+
+ return self._cluster_policy_bindings
@property
def role_binding(self):
@@ -62,36 +84,37 @@ class PolicyUser(OpenShiftCLI):
self._scc = scc
def get(self):
- '''fetch the desired kind'''
+ '''fetch the desired kind
+
+ This is only used for scc objects.
+ The {cluster}rolebindings happen in exists.
+ '''
resource_name = self.config.config_options['name']['value']
if resource_name == 'cluster-reader':
resource_name += 's'
- # oc adm policy add-... creates policy bindings with the name
- # "[resource_name]-binding", however some bindings in the system
- # simply use "[resource_name]". So try both.
-
- results = self._get(self.config.kind, resource_name)
- if results['returncode'] == 0:
- return results
-
- # Now try -binding naming convention
- return self._get(self.config.kind, resource_name + "-binding")
+ return self._get(self.config.kind, resource_name)
def exists_role_binding(self):
''' return whether role_binding exists '''
- results = self.get()
- if results['returncode'] == 0:
- self.role_binding = RoleBinding(results['results'][0])
- if self.role_binding.find_user_name(self.config.config_options['user']['value']) != None:
- return True
+ bindings = None
+ if self.config.config_options['resource_kind']['value'] == 'cluster-role':
+ bindings = self.clusterpolicybindings
+ else:
+ bindings = self.policybindings
+ if bindings is None:
return False
- elif self.config.config_options['name']['value'] in results['stderr'] and '" not found' in results['stderr']:
- return False
+ for binding in bindings['roleBindings']:
+ _rb = binding['roleBinding']
+ if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \
+ _rb['userNames'] is not None and \
+ self.config.config_options['user']['value'] in _rb['userNames']:
+ self.role_binding = binding
+ return True
- return results
+ return False
def exists_scc(self):
''' return whether scc exists '''
diff --git a/roles/lib_openshift/src/class/oc_adm_registry.py b/roles/lib_openshift/src/class/oc_adm_registry.py
index c083cd179..720b44cdc 100644
--- a/roles/lib_openshift/src/class/oc_adm_registry.py
+++ b/roles/lib_openshift/src/class/oc_adm_registry.py
@@ -87,8 +87,8 @@ class Registry(OpenShiftCLI):
''' prepared_registry property '''
if not self.__prepared_registry:
results = self.prepare_registry()
- if not results:
- raise RegistryException('Could not perform registry preparation.')
+ if not results or ('returncode' in results and results['returncode'] != 0):
+ raise RegistryException('Could not perform registry preparation. {}'.format(results))
self.__prepared_registry = results
return self.__prepared_registry
@@ -105,7 +105,7 @@ class Registry(OpenShiftCLI):
rval = 0
for part in self.registry_parts:
- result = self._get(part['kind'], rname=part['name'])
+ result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
@@ -153,8 +153,8 @@ class Registry(OpenShiftCLI):
# probably need to parse this
# pylint thinks results is a string
# pylint: disable=no-member
- if results['returncode'] != 0 and 'items' in results['results']:
- return results
+ if results['returncode'] != 0 and 'items' not in results['results']:
+ raise RegistryException('Could not perform registry preparation. {}'.format(results))
service = None
deploymentconfig = None
diff --git a/roles/lib_openshift/src/class/oc_adm_router.py b/roles/lib_openshift/src/class/oc_adm_router.py
index 356d06fdf..1a0b94b80 100644
--- a/roles/lib_openshift/src/class/oc_adm_router.py
+++ b/roles/lib_openshift/src/class/oc_adm_router.py
@@ -136,7 +136,7 @@ class Router(OpenShiftCLI):
self.secret = None
self.rolebinding = None
for part in self.router_parts:
- result = self._get(part['kind'], rname=part['name'])
+ result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
diff --git a/roles/lib_openshift/src/class/oc_clusterrole.py b/roles/lib_openshift/src/class/oc_clusterrole.py
new file mode 100644
index 000000000..1d3d977db
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_clusterrole.py
@@ -0,0 +1,163 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-instance-attributes
+class OCClusterRole(OpenShiftCLI):
+ ''' Class to manage clusterrole objects'''
+ kind = 'clusterrole'
+
+ def __init__(self,
+ name,
+ rules=None,
+ kubeconfig=None,
+ verbose=False):
+ ''' Constructor for OCClusterRole '''
+ super(OCClusterRole, self).__init__(None, kubeconfig=kubeconfig, verbose=verbose)
+ self.verbose = verbose
+ self.name = name
+ self._clusterrole = None
+ self._inc_clusterrole = ClusterRole.builder(name, rules)
+
+ @property
+ def clusterrole(self):
+ ''' property for clusterrole'''
+ if not self._clusterrole:
+ self.get()
+ return self._clusterrole
+
+ @clusterrole.setter
+ def clusterrole(self, data):
+ ''' setter function for clusterrole property'''
+ self._clusterrole = data
+
+ @property
+ def inc_clusterrole(self):
+ ''' property for inc_clusterrole'''
+ return self._inc_clusterrole
+
+ @inc_clusterrole.setter
+ def inc_clusterrole(self, data):
+ ''' setter function for inc_clusterrole property'''
+ self._inc_clusterrole = data
+
+ def exists(self):
+ ''' return whether a clusterrole exists '''
+ if self.clusterrole:
+ return True
+
+ return False
+
+ def get(self):
+ '''return a clusterrole '''
+ result = self._get(self.kind, self.name)
+
+ if result['returncode'] == 0:
+ self.clusterrole = ClusterRole(content=result['results'][0])
+ result['results'] = self.clusterrole.yaml_dict
+
+ elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']:
+ result['returncode'] = 0
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.name)
+
+ def create(self):
+ '''create a clusterrole from the proposed incoming clusterrole'''
+ return self._create_from_content(self.name, self.inc_clusterrole.yaml_dict)
+
+ def update(self):
+ '''update a project'''
+ return self._replace_content(self.kind, self.name, self.inc_clusterrole.yaml_dict)
+
+ def needs_update(self):
+ ''' verify an update is needed'''
+ return not self.clusterrole.compare(self.inc_clusterrole, self.verbose)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+
+ oc_clusterrole = OCClusterRole(params['name'],
+ params['rules'],
+ params['kubeconfig'],
+ params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_clusterrole.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_clusterrole.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ api_rval = oc_clusterrole.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_clusterrole.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ # Create it here
+ api_rval = oc_clusterrole.create()
+
+ # return the created object
+ api_rval = oc_clusterrole.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if oc_clusterrole.needs_update():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = oc_clusterrole.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_clusterrole.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ return {'failed': True,
+ 'changed': False,
+ 'msg': 'Unknown state passed. [%s]' % state}
diff --git a/roles/lib_openshift/src/class/oc_configmap.py b/roles/lib_openshift/src/class/oc_configmap.py
new file mode 100644
index 000000000..de77d1102
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_configmap.py
@@ -0,0 +1,191 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-arguments
+class OCConfigMap(OpenShiftCLI):
+ ''' Openshift ConfigMap Class
+
+ ConfigMaps are a way to store data inside of objects
+ '''
+ def __init__(self,
+ name,
+ from_file,
+ from_literal,
+ state,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ super(OCConfigMap, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
+ self.name = name
+ self.state = state
+ self._configmap = None
+ self._inc_configmap = None
+ self.from_file = from_file if from_file is not None else {}
+ self.from_literal = from_literal if from_literal is not None else {}
+
+ @property
+ def configmap(self):
+ if self._configmap is None:
+ self._configmap = self.get()
+
+ return self._configmap
+
+ @configmap.setter
+ def configmap(self, inc_map):
+ self._configmap = inc_map
+
+ @property
+ def inc_configmap(self):
+ if self._inc_configmap is None:
+ results = self.create(dryrun=True, output=True)
+ self._inc_configmap = results['results']
+
+ return self._inc_configmap
+
+ @inc_configmap.setter
+ def inc_configmap(self, inc_map):
+ self._inc_configmap = inc_map
+
+ def from_file_to_params(self):
+ '''return from_files in a string ready for cli'''
+ return ["--from-file={}={}".format(key, value) for key, value in self.from_file.items()]
+
+ def from_literal_to_params(self):
+ '''return from_literal in a string ready for cli'''
+ return ["--from-literal={}={}".format(key, value) for key, value in self.from_literal.items()]
+
+ def get(self):
+ '''return a configmap by name '''
+ results = self._get('configmap', self.name)
+ if results['returncode'] == 0 and results['results'][0]:
+ self.configmap = results['results'][0]
+
+ if results['returncode'] != 0 and '"{}" not found'.format(self.name) in results['stderr']:
+ results['returncode'] = 0
+
+ return results
+
+ def delete(self):
+ '''delete a configmap by name'''
+ return self._delete('configmap', self.name)
+
+ def create(self, dryrun=False, output=False):
+ '''Create a configmap
+
+ :dryrun: Product what you would have done. default: False
+ :output: Whether to parse output. default: False
+ '''
+
+ cmd = ['create', 'configmap', self.name]
+ if self.from_literal is not None:
+ cmd.extend(self.from_literal_to_params())
+
+ if self.from_file is not None:
+ cmd.extend(self.from_file_to_params())
+
+ if dryrun:
+ cmd.extend(['--dry-run', '-ojson'])
+
+ results = self.openshift_cmd(cmd, output=output)
+
+ return results
+
+ def update(self):
+ '''run update configmap '''
+ return self._replace_content('configmap', self.name, self.inc_configmap)
+
+ def needs_update(self):
+ '''compare the current configmap with the proposed and return if they are equal'''
+ return not Utils.check_def_equal(self.inc_configmap, self.configmap, debug=self.verbose)
+
+ @staticmethod
+ # pylint: disable=too-many-return-statements,too-many-branches
+ # TODO: This function should be refactored into its individual parts.
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ oc_cm = OCConfigMap(params['name'],
+ params['from_file'],
+ params['from_literal'],
+ params['state'],
+ params['namespace'],
+ kubeconfig=params['kubeconfig'],
+ verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_cm.get()
+
+ if 'failed' in api_rval:
+ return {'failed': True, 'msg': api_rval}
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ if not params['name']:
+ return {'failed': True,
+ 'msg': 'Please specify a name when state is absent|present.'}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not Utils.exists(api_rval['results'], params['name']):
+ return {'changed': False, 'state': 'absent'}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ api_rval = oc_cm.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Create
+ ########
+ if state == 'present':
+ if not Utils.exists(api_rval['results'], params['name']):
+
+ if check_mode:
+ return {'changed': True, 'msg': 'Would have performed a create.'}
+
+ api_rval = oc_cm.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ api_rval = oc_cm.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if oc_cm.needs_update():
+
+ api_rval = oc_cm.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ api_rval = oc_cm.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
diff --git a/roles/lib_openshift/src/class/oc_group.py b/roles/lib_openshift/src/class/oc_group.py
new file mode 100644
index 000000000..89fb09ea4
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_group.py
@@ -0,0 +1,148 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+class OCGroup(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'group'
+
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCGroup '''
+ super(OCGroup, self).__init__(config.namespace, config.kubeconfig)
+ self.config = config
+ self.namespace = config.namespace
+ self._group = None
+
+ @property
+ def group(self):
+ ''' property function service'''
+ if not self._group:
+ self.get()
+ return self._group
+
+ @group.setter
+ def group(self, data):
+ ''' setter function for yedit var '''
+ self._group = data
+
+ def exists(self):
+ ''' return whether a group exists '''
+ if self.group:
+ return True
+
+ return False
+
+ def get(self):
+ '''return group information '''
+ result = self._get(self.kind, self.config.name)
+ if result['returncode'] == 0:
+ self.group = Group(content=result['results'][0])
+ elif 'groups \"{}\" not found'.format(self.config.name) in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create the object'''
+ return self._create_from_content(self.config.name, self.config.data)
+
+ def update(self):
+ '''update the object'''
+ return self._replace_content(self.kind, self.config.name, self.config.data)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=[], debug=True)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, check_mode=False):
+ '''run the idempotent ansible code'''
+
+ gconfig = GroupConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ )
+ oc_group = OCGroup(gconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_group.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_group.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ api_rval = oc_group.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_group.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ # Create it here
+ api_rval = oc_group.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_group.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if oc_group.needs_update():
+ api_rval = oc_group.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_group.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
diff --git a/roles/lib_openshift/src/class/oc_image.py b/roles/lib_openshift/src/class/oc_image.py
new file mode 100644
index 000000000..d25349127
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_image.py
@@ -0,0 +1,91 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-arguments
+class OCImage(OpenShiftCLI):
+ ''' Class to import and create an imagestream object'''
+ def __init__(self,
+ namespace,
+ registry_url,
+ image_name,
+ image_tag,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for OCImage'''
+ super(OCImage, self).__init__(namespace, kubeconfig)
+ self.registry_url = registry_url
+ self.image_name = image_name
+ self.image_tag = image_tag
+ self.verbose = verbose
+
+ def get(self):
+ '''return a image by name '''
+ results = self._get('imagestream', self.image_name)
+ results['exists'] = False
+ if results['returncode'] == 0 and results['results'][0]:
+ results['exists'] = True
+
+ if results['returncode'] != 0 and '"{}" not found'.format(self.image_name) in results['stderr']:
+ results['returncode'] = 0
+
+ return results
+
+ def create(self, url=None, name=None, tag=None):
+ '''Create an image '''
+ return self._import_image(url, name, tag)
+
+
+ # pylint: disable=too-many-return-statements
+ @staticmethod
+ def run_ansible(params, check_mode):
+ ''' run the ansible idempotent code '''
+
+ ocimage = OCImage(params['namespace'],
+ params['registry_url'],
+ params['image_name'],
+ params['image_tag'],
+ kubeconfig=params['kubeconfig'],
+ verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = ocimage.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ if api_rval['returncode'] != 0:
+ return {"failed": True, "msg": api_rval}
+ return {"changed": False, "results": api_rval, "state": "list"}
+
+ ########
+ # Create
+ ########
+ if state == 'present':
+
+ if not Utils.exists(api_rval['results'], params['image_name']):
+
+ if check_mode:
+ return {"changed": False, "msg": 'CHECK_MODE: Would have performed a create'}
+
+ api_rval = ocimage.create(params['registry_url'],
+ params['image_name'],
+ params['image_tag'])
+
+ if api_rval['returncode'] != 0:
+ return {"failed": True, "msg": api_rval}
+
+ # return the newly created object
+ api_rval = ocimage.get()
+
+ if api_rval['returncode'] != 0:
+ return {"failed": True, "msg": api_rval}
+
+ return {"changed": True, "results": api_rval, "state": "present"}
+
+ # image exists, no change
+ return {"changed": False, "results": api_rval, "state": "present"}
+
+ return {"failed": True, "changed": False, "msg": "Unknown state passed. {0}".format(state)}
diff --git a/roles/lib_openshift/src/class/oc_label.py b/roles/lib_openshift/src/class/oc_label.py
index bd312c170..0a6895177 100644
--- a/roles/lib_openshift/src/class/oc_label.py
+++ b/roles/lib_openshift/src/class/oc_label.py
@@ -134,9 +134,9 @@ class OCLabel(OpenShiftCLI):
label_list = []
if self.name:
- result = self._get(resource=self.kind, rname=self.name, selector=self.selector)
+ result = self._get(resource=self.kind, name=self.name, selector=self.selector)
- if 'labels' in result['results'][0]['metadata']:
+ if result['results'][0] and 'labels' in result['results'][0]['metadata']:
label_list.append(result['results'][0]['metadata']['labels'])
else:
label_list.append({})
diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py
index 51d3ce996..667b98eac 100644
--- a/roles/lib_openshift/src/class/oc_obj.py
+++ b/roles/lib_openshift/src/class/oc_obj.py
@@ -10,7 +10,7 @@ class OCObject(OpenShiftCLI):
def __init__(self,
kind,
namespace,
- rname=None,
+ name=None,
selector=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
@@ -19,21 +19,21 @@ class OCObject(OpenShiftCLI):
super(OCObject, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose,
all_namespaces=all_namespaces)
self.kind = kind
- self.name = rname
+ self.name = name
self.selector = selector
def get(self):
'''return a kind by name '''
- results = self._get(self.kind, rname=self.name, selector=self.selector)
- if results['returncode'] != 0 and 'stderr' in results and \
- '\"%s\" not found' % self.name in results['stderr']:
+ results = self._get(self.kind, name=self.name, selector=self.selector)
+ if (results['returncode'] != 0 and 'stderr' in results and
+ '\"{}\" not found'.format(self.name) in results['stderr']):
results['returncode'] = 0
return results
def delete(self):
- '''return all pods '''
- return self._delete(self.kind, self.name)
+ '''delete the object'''
+ return self._delete(self.kind, name=self.name, selector=self.selector)
def create(self, files=None, content=None):
'''
@@ -109,24 +109,33 @@ class OCObject(OpenShiftCLI):
# Get
#####
if state == 'list':
- return {'changed': False, 'results': api_rval, 'state': 'list'}
-
- if not params['name']:
- return {'failed': True, 'msg': 'Please specify a name when state is absent|present.'} # noqa: E501
+ return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
- if not Utils.exists(api_rval['results'], params['name']):
- return {'changed': False, 'state': 'absent'}
+ # if we were passed a name, verify its not in our results
+ if params['name'] is not None and not Utils.exists(api_rval['results'], params['name']):
+ return {'changed': False, 'state': state}
+
+ # verify results are empty for the selector
+ if params['selector'] is not None and len(api_rval['results']) == 0:
+ return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'}
api_rval = ocobj.delete()
- return {'changed': True, 'results': api_rval, 'state': 'absent'}
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ # create/update: Must define a name beyond this point
+ if not params['name']:
+ return {'failed': True, 'msg': 'Please specify a name when state is present.'}
if state == 'present':
########
@@ -152,7 +161,7 @@ class OCObject(OpenShiftCLI):
if params['files'] and params['delete_after']:
Utils.cleanup(params['files'])
- return {'changed': True, 'results': api_rval, 'state': "present"}
+ return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
@@ -167,7 +176,7 @@ class OCObject(OpenShiftCLI):
if params['files'] and params['delete_after']:
Utils.cleanup(params['files'])
- return {'changed': False, 'results': api_rval['results'][0], 'state': "present"}
+ return {'changed': False, 'results': api_rval['results'][0], 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
@@ -186,4 +195,4 @@ class OCObject(OpenShiftCLI):
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
- return {'changed': True, 'results': api_rval, 'state': "present"}
+ return {'changed': True, 'results': api_rval, 'state': state}
diff --git a/roles/lib_openshift/src/class/oc_process.py b/roles/lib_openshift/src/class/oc_process.py
index 9d29938aa..eba9a43cd 100644
--- a/roles/lib_openshift/src/class/oc_process.py
+++ b/roles/lib_openshift/src/class/oc_process.py
@@ -136,7 +136,7 @@ class OCProcess(OpenShiftCLI):
if api_rval['returncode'] != 0:
return {"failed": True, "msg" : api_rval}
- return {"changed" : False, "results": api_rval, "state": "list"}
+ return {"changed" : False, "results": api_rval, "state": state}
elif state == 'present':
if check_mode and params['create']:
@@ -158,9 +158,9 @@ class OCProcess(OpenShiftCLI):
return {"failed": True, "msg": api_rval}
if params['create']:
- return {"changed": True, "results": api_rval, "state": "present"}
+ return {"changed": True, "results": api_rval, "state": state}
- return {"changed": False, "results": api_rval, "state": "present"}
+ return {"changed": False, "results": api_rval, "state": state}
# verify results
update = False
@@ -175,11 +175,11 @@ class OCProcess(OpenShiftCLI):
update = True
if not update:
- return {"changed": update, "results": api_rval, "state": "present"}
+ return {"changed": update, "results": api_rval, "state": state}
for cmd in rval:
if cmd['returncode'] != 0:
- return {"failed": True, "changed": update, "results": rval, "state": "present"}
+ return {"failed": True, "changed": update, "msg": rval, "state": state}
- return {"changed": update, "results": rval, "state": "present"}
+ return {"changed": update, "results": rval, "state": state}
diff --git a/roles/lib_openshift/src/class/oc_project.py b/roles/lib_openshift/src/class/oc_project.py
index 7e3984297..9ad8111a8 100644
--- a/roles/lib_openshift/src/class/oc_project.py
+++ b/roles/lib_openshift/src/class/oc_project.py
@@ -61,30 +61,34 @@ class OCProject(OpenShiftCLI):
def update(self):
'''update a project '''
- self.project.update_annotation('display-name', self.config.config_options['display_name']['value'])
- self.project.update_annotation('description', self.config.config_options['description']['value'])
+ if self.config.config_options['display_name']['value'] is not None:
+ self.project.update_annotation('display-name', self.config.config_options['display_name']['value'])
+
+ if self.config.config_options['description']['value'] is not None:
+ self.project.update_annotation('description', self.config.config_options['description']['value'])
# work around for immutable project field
- if self.config.config_options['node_selector']['value']:
+ if self.config.config_options['node_selector']['value'] is not None:
self.project.update_annotation('node-selector', self.config.config_options['node_selector']['value'])
- else:
- self.project.update_annotation('node-selector', self.project.find_annotation('node-selector'))
return self._replace_content(self.kind, self.config.name, self.project.yaml_dict)
def needs_update(self):
''' verify an update is needed '''
- result = self.project.find_annotation("display-name")
- if result != self.config.config_options['display_name']['value']:
- return True
+ if self.config.config_options['display_name']['value'] is not None:
+ result = self.project.find_annotation("display-name")
+ if result != self.config.config_options['display_name']['value']:
+ return True
- result = self.project.find_annotation("description")
- if result != self.config.config_options['description']['value']:
- return True
+ if self.config.config_options['description']['value'] is not None:
+ result = self.project.find_annotation("description")
+ if result != self.config.config_options['description']['value']:
+ return True
- result = self.project.find_annotation("node-selector")
- if result != self.config.config_options['node_selector']['value']:
- return True
+ if self.config.config_options['node_selector']['value'] is not None:
+ result = self.project.find_annotation("node-selector")
+ if result != self.config.config_options['node_selector']['value']:
+ return True
return False
@@ -93,19 +97,22 @@ class OCProject(OpenShiftCLI):
def run_ansible(params, check_mode):
'''run the idempotent ansible code'''
- _ns = None
+ node_selector = None
if params['node_selector'] is not None:
- _ns = ','.join(params['node_selector'])
-
- pconfig = ProjectConfig(params['name'],
- 'None',
- params['kubeconfig'],
- {'admin': {'value': params['admin'], 'include': True},
- 'admin_role': {'value': params['admin_role'], 'include': True},
- 'description': {'value': params['description'], 'include': True},
- 'display_name': {'value': params['display_name'], 'include': True},
- 'node_selector': {'value': _ns, 'include': True},
- })
+ node_selector = ','.join(params['node_selector'])
+
+ pconfig = ProjectConfig(
+ params['name'],
+ 'None',
+ params['kubeconfig'],
+ {
+ 'admin': {'value': params['admin'], 'include': True},
+ 'admin_role': {'value': params['admin_role'], 'include': True},
+ 'description': {'value': params['description'], 'include': True},
+ 'display_name': {'value': params['display_name'], 'include': True},
+ 'node_selector': {'value': node_selector, 'include': True},
+ },
+ )
oadm_project = OCProject(pconfig, verbose=params['debug'])
diff --git a/roles/lib_openshift/src/class/oc_pvc.py b/roles/lib_openshift/src/class/oc_pvc.py
new file mode 100644
index 000000000..c73abc47c
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_pvc.py
@@ -0,0 +1,167 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-instance-attributes
+class OCPVC(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'pvc'
+
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCVolume '''
+ super(OCPVC, self).__init__(config.namespace, config.kubeconfig)
+ self.config = config
+ self.namespace = config.namespace
+ self._pvc = None
+
+ @property
+ def pvc(self):
+ ''' property function pvc'''
+ if not self._pvc:
+ self.get()
+ return self._pvc
+
+ @pvc.setter
+ def pvc(self, data):
+ ''' setter function for yedit var '''
+ self._pvc = data
+
+ def bound(self):
+ '''return whether the pvc is bound'''
+ if self.pvc.get_volume_name():
+ return True
+
+ return False
+
+ def exists(self):
+ ''' return whether a pvc exists '''
+ if self.pvc:
+ return True
+
+ return False
+
+ def get(self):
+ '''return pvc information '''
+ result = self._get(self.kind, self.config.name)
+ if result['returncode'] == 0:
+ self.pvc = PersistentVolumeClaim(content=result['results'][0])
+ elif '\"%s\" not found' % self.config.name in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create the object'''
+ return self._create_from_content(self.config.name, self.config.data)
+
+ def update(self):
+ '''update the object'''
+ # need to update the tls information and the service name
+ return self._replace_content(self.kind, self.config.name, self.config.data)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ if self.pvc.get_volume_name() or self.pvc.is_bound():
+ return False
+
+ skip = []
+ return not Utils.check_def_equal(self.config.data, self.pvc.yaml_dict, skip_keys=skip, debug=True)
+
+ # pylint: disable=too-many-branches,too-many-return-statements
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+ pconfig = PersistentVolumeClaimConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ params['access_modes'],
+ params['volume_capacity'],
+ )
+ oc_pvc = OCPVC(pconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_pvc.get()
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_pvc.exists():
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ api_rval = oc_pvc.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_pvc.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ # Create it here
+ api_rval = oc_pvc.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_pvc.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if oc_pvc.pvc.is_bound() or oc_pvc.pvc.get_volume_name():
+ api_rval['msg'] = '##### - This volume is currently bound. Will not update - ####'
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ if oc_pvc.needs_update():
+ api_rval = oc_pvc.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_pvc.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
diff --git a/roles/lib_openshift/src/class/oc_user.py b/roles/lib_openshift/src/class/oc_user.py
new file mode 100644
index 000000000..d9e4eac13
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_user.py
@@ -0,0 +1,227 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=too-many-instance-attributes
+class OCUser(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'users'
+
+ def __init__(self,
+ config,
+ groups=None,
+ verbose=False):
+ ''' Constructor for OCUser '''
+ # namespace has no meaning for user operations, hardcode to 'default'
+ super(OCUser, self).__init__('default', config.kubeconfig)
+ self.config = config
+ self.groups = groups
+ self._user = None
+
+ @property
+ def user(self):
+ ''' property function user'''
+ if not self._user:
+ self.get()
+ return self._user
+
+ @user.setter
+ def user(self, data):
+ ''' setter function for user '''
+ self._user = data
+
+ def exists(self):
+ ''' return whether a user exists '''
+ if self.user:
+ return True
+
+ return False
+
+ def get(self):
+ ''' return user information '''
+ result = self._get(self.kind, self.config.username)
+ if result['returncode'] == 0:
+ self.user = User(content=result['results'][0])
+ elif 'users \"%s\" not found' % self.config.username in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ ''' delete the object '''
+ return self._delete(self.kind, self.config.username)
+
+ def create_group_entries(self):
+ ''' make entries for user to the provided group list '''
+ if self.groups != None:
+ for group in self.groups:
+ cmd = ['groups', 'add-users', group, self.config.username]
+ rval = self.openshift_cmd(cmd, oadm=True)
+ if rval['returncode'] != 0:
+ return rval
+
+ return rval
+
+ return {'returncode': 0}
+
+ def create(self):
+ ''' create the object '''
+ rval = self.create_group_entries()
+ if rval['returncode'] != 0:
+ return rval
+
+ return self._create_from_content(self.config.username, self.config.data)
+
+ def group_update(self):
+ ''' update group membership '''
+ rval = {'returncode': 0}
+ cmd = ['get', 'groups', '-o', 'json']
+ all_groups = self.openshift_cmd(cmd, output=True)
+
+ # pylint misindentifying all_groups['results']['items'] type
+ # pylint: disable=invalid-sequence-index
+ for group in all_groups['results']['items']:
+ # If we're supposed to be in this group
+ if group['metadata']['name'] in self.groups \
+ and (group['users'] is None or self.config.username not in group['users']):
+ cmd = ['groups', 'add-users', group['metadata']['name'],
+ self.config.username]
+ rval = self.openshift_cmd(cmd, oadm=True)
+ if rval['returncode'] != 0:
+ return rval
+ # else if we're in the group, but aren't supposed to be
+ elif group['users'] != None and self.config.username in group['users'] \
+ and group['metadata']['name'] not in self.groups:
+ cmd = ['groups', 'remove-users', group['metadata']['name'],
+ self.config.username]
+ rval = self.openshift_cmd(cmd, oadm=True)
+ if rval['returncode'] != 0:
+ return rval
+
+ return rval
+
+ def update(self):
+ ''' update the object '''
+ rval = self.group_update()
+ if rval['returncode'] != 0:
+ return rval
+
+ # need to update the user's info
+ return self._replace_content(self.kind, self.config.username, self.config.data, force=True)
+
+ def needs_group_update(self):
+ ''' check if there are group membership changes '''
+ cmd = ['get', 'groups', '-o', 'json']
+ all_groups = self.openshift_cmd(cmd, output=True)
+
+ # pylint misindentifying all_groups['results']['items'] type
+ # pylint: disable=invalid-sequence-index
+ for group in all_groups['results']['items']:
+ # If we're supposed to be in this group
+ if group['metadata']['name'] in self.groups \
+ and (group['users'] is None or self.config.username not in group['users']):
+ return True
+ # else if we're in the group, but aren't supposed to be
+ elif group['users'] != None and self.config.username in group['users'] \
+ and group['metadata']['name'] not in self.groups:
+ return True
+
+ return False
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ skip = []
+ if self.needs_group_update():
+ return True
+
+ return not Utils.check_def_equal(self.config.data, self.user.yaml_dict, skip_keys=skip, debug=True)
+
+ # pylint: disable=too-many-return-statements
+ @staticmethod
+ def run_ansible(params, check_mode=False):
+ ''' run the idempotent ansible code
+
+ params comes from the ansible portion of this module
+ check_mode: does the module support check mode. (module.check_mode)
+ '''
+
+ uconfig = UserConfig(params['kubeconfig'],
+ params['username'],
+ params['full_name'],
+ )
+
+ oc_user = OCUser(uconfig, params['groups'],
+ verbose=params['debug'])
+ state = params['state']
+
+ api_rval = oc_user.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': "list"}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_user.exists():
+
+ if check_mode:
+ return {'changed': False, 'msg': 'Would have performed a delete.'}
+
+ api_rval = oc_user.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': "absent"}
+ return {'changed': False, 'state': "absent"}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_user.exists():
+
+ if check_mode:
+ return {'changed': False, 'msg': 'Would have performed a create.'}
+
+ # Create it here
+ api_rval = oc_user.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_user.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': "present"}
+
+ ########
+ # Update
+ ########
+ if oc_user.needs_update():
+ api_rval = oc_user.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ orig_cmd = api_rval['cmd']
+ # return the created object
+ api_rval = oc_user.get()
+ # overwrite the get/list cmd
+ api_rval['cmd'] = orig_cmd
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': "present"}
+
+ return {'changed': False, 'results': api_rval, 'state': "present"}
+
+ return {'failed': True,
+ 'changed': False,
+ 'results': 'Unknown state passed. %s' % state,
+ 'state': "unknown"}
diff --git a/roles/lib_openshift/src/class/oc_volume.py b/roles/lib_openshift/src/class/oc_volume.py
new file mode 100644
index 000000000..45b58a516
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_volume.py
@@ -0,0 +1,195 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-instance-attributes
+class OCVolume(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
+ "dc": "spec.template.spec.containers[0].volumeMounts",
+ "rc": "spec.template.spec.containers[0].volumeMounts",
+ }
+ volumes_path = {"pod": "spec.volumes",
+ "dc": "spec.template.spec.volumes",
+ "rc": "spec.template.spec.volumes",
+ }
+
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ kind,
+ resource_name,
+ namespace,
+ vol_name,
+ mount_path,
+ mount_type,
+ secret_name,
+ claim_size,
+ claim_name,
+ configmap_name,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for OCVolume '''
+ super(OCVolume, self).__init__(namespace, kubeconfig)
+ self.kind = kind
+ self.volume_info = {'name': vol_name,
+ 'secret_name': secret_name,
+ 'path': mount_path,
+ 'type': mount_type,
+ 'claimSize': claim_size,
+ 'claimName': claim_name,
+ 'configmap_name': configmap_name}
+ self.volume, self.volume_mount = Volume.create_volume_structure(self.volume_info)
+ self.name = resource_name
+ self.namespace = namespace
+ self.kubeconfig = kubeconfig
+ self.verbose = verbose
+ self._resource = None
+
+ @property
+ def resource(self):
+ ''' property function for resource var '''
+ if not self._resource:
+ self.get()
+ return self._resource
+
+ @resource.setter
+ def resource(self, data):
+ ''' setter function for resource var '''
+ self._resource = data
+
+ def exists(self):
+ ''' return whether a volume exists '''
+ volume_mount_found = False
+ volume_found = self.resource.exists_volume(self.volume)
+ if not self.volume_mount and volume_found:
+ return True
+
+ if self.volume_mount:
+ volume_mount_found = self.resource.exists_volume_mount(self.volume_mount)
+
+ if volume_found and self.volume_mount and volume_mount_found:
+ return True
+
+ return False
+
+ def get(self):
+ '''return volume information '''
+ vol = self._get(self.kind, self.name)
+ if vol['returncode'] == 0:
+ if self.kind == 'dc':
+ self.resource = DeploymentConfig(content=vol['results'][0])
+ vol['results'] = self.resource.get_volumes()
+
+ return vol
+
+ def delete(self):
+ '''remove a volume'''
+ self.resource.delete_volume_by_name(self.volume)
+ return self._replace_content(self.kind, self.name, self.resource.yaml_dict)
+
+ def put(self):
+ '''place volume into dc '''
+ self.resource.update_volume(self.volume)
+ self.resource.get_volumes()
+ self.resource.update_volume_mount(self.volume_mount)
+ return self._replace_content(self.kind, self.name, self.resource.yaml_dict)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ return self.resource.needs_update_volume(self.volume, self.volume_mount)
+
+ # pylint: disable=too-many-branches,too-many-return-statements
+ @staticmethod
+ def run_ansible(params, check_mode=False):
+ '''run the idempotent ansible code'''
+ oc_volume = OCVolume(params['kind'],
+ params['name'],
+ params['namespace'],
+ params['vol_name'],
+ params['mount_path'],
+ params['mount_type'],
+ # secrets
+ params['secret_name'],
+ # pvc
+ params['claim_size'],
+ params['claim_name'],
+ # configmap
+ params['configmap_name'],
+ kubeconfig=params['kubeconfig'],
+ verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_volume.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_volume.exists():
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ api_rval = oc_volume.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_volume.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ # Create it here
+ api_rval = oc_volume.put()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_volume.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if oc_volume.needs_update():
+ api_rval = oc_volume.put()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_volume.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, state: state}
+
+ return {'changed': False, 'results': api_rval, state: state}
+
+ return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
diff --git a/roles/lib_openshift/src/doc/ca_server_cert b/roles/lib_openshift/src/doc/ca_server_cert
index ff9229281..7f2be4ada 100644
--- a/roles/lib_openshift/src/doc/ca_server_cert
+++ b/roles/lib_openshift/src/doc/ca_server_cert
@@ -79,6 +79,12 @@ options:
required: false
default: True
aliases: []
+ expire_days:
+ description
+ - Validity of the certificate in days
+ required: false
+ default: None
+ aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
diff --git a/roles/lib_openshift/src/doc/clusterrole b/roles/lib_openshift/src/doc/clusterrole
new file mode 100644
index 000000000..3d14a2dfb
--- /dev/null
+++ b/roles/lib_openshift/src/doc/clusterrole
@@ -0,0 +1,66 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_clusterrole
+short_description: Modify, and idempotently manage openshift clusterroles
+description:
+ - Manage openshift clusterroles
+options:
+ state:
+ description:
+ - Supported states, present, absent, list
+ - present - will ensure object is created or updated to the value specified
+ - list - will return a clusterrole
+ - absent - will remove a clusterrole
+ required: False
+ default: present
+ choices: ["present", 'absent', 'list']
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ rules:
+ description:
+ - A list of dictionaries that have the rule parameters.
+ - e.g. rules=[{'apiGroups': [""], 'attributeRestrictions': None, 'verbs': ['get'], 'resources': []}]
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: query a list of env vars on dc
+ oc_clusterrole:
+ name: myclusterrole
+ state: list
+
+- name: Set the following variables.
+ oc_clusterrole:
+ name: myclusterrole
+ rules:
+ apiGroups:
+ - ""
+ attributeRestrictions: null
+ verbs: []
+ resources: []
+'''
diff --git a/roles/lib_openshift/src/doc/configmap b/roles/lib_openshift/src/doc/configmap
new file mode 100644
index 000000000..5ca8292c4
--- /dev/null
+++ b/roles/lib_openshift/src/doc/configmap
@@ -0,0 +1,72 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_configmap
+short_description: Modify, and idempotently manage openshift configmaps
+description:
+ - Modify openshift configmaps programmatically.
+options:
+ state:
+ description:
+ - Supported states, present, absent, list
+ - present - will ensure object is created or updated to the value specified
+ - list - will return a configmap
+ - absent - will remove the configmap
+ required: False
+ default: present
+ choices: ["present", 'absent', 'list']
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: True
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: default
+ aliases: []
+ from_file:
+ description:
+ - A dict of key, value pairs representing the configmap key and the value represents the file path.
+ required: false
+ default: None
+ aliases: []
+ from_literal:
+ description:
+ - A dict of key, value pairs representing the configmap key and the value represents the string content
+ required: false
+ default: None
+ aliases: []
+author:
+- "kenny woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: create group
+ oc_configmap:
+ state: present
+ name: testmap
+ from_file:
+ secret: /path/to/secret
+ from_literal:
+ title: systemadmin
+ register: configout
+'''
diff --git a/roles/lib_openshift/src/doc/group b/roles/lib_openshift/src/doc/group
new file mode 100644
index 000000000..c5ba6ebd9
--- /dev/null
+++ b/roles/lib_openshift/src/doc/group
@@ -0,0 +1,56 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_group
+short_description: Modify, and idempotently manage openshift groups.
+description:
+ - Modify openshift groups programmatically.
+options:
+ state:
+ description:
+ - Supported states, present, absent, list
+ - present - will ensure object is created or updated to the value specified
+ - list - will return a group
+ - absent - will remove the group
+ required: False
+ default: present
+ choices: ["present", 'absent', 'list']
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+author:
+- "Joel Diaz <jdiaz@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: create group
+ oc_group:
+ state: present
+ name: acme_org
+ register: group_out
+'''
diff --git a/roles/lib_openshift/src/doc/image b/roles/lib_openshift/src/doc/image
new file mode 100644
index 000000000..18cf4e168
--- /dev/null
+++ b/roles/lib_openshift/src/doc/image
@@ -0,0 +1,75 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_image
+short_description: Create, modify, and idempotently manage openshift labels.
+description:
+ - Modify openshift labels programmatically.
+options:
+ state:
+ description:
+ - State controls the action that will be taken with resource
+ - 'present' will create. Does _not_ support update.
+ - 'list' will read the labels
+ default: present
+ choices: ["present", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ namespace:
+ description:
+ - The namespace where this object lives
+ required: false
+ default: default
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ registry_url:
+ description:
+ - The url for the registry so that openshift can pull the image
+ required: false
+ default: None
+ aliases: []
+ image_name:
+ description:
+ - The name of the image being imported
+ required: false
+ default: False
+ aliases: []
+ image_tag:
+ description:
+ - The tag of the image being imported
+ required: false
+ default: None
+ aliases: []
+author:
+- "Ivan Horvath<ihorvath@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: Get an imagestream
+ oc_image:
+ name: php55
+ state: list
+ register: imageout
+
+- name: create an imagestream
+ oc_image:
+ state: present
+ image_name: php55
+ image_tag: int
+ registry_url: registry.example.com
+ namespace: default
+ register: imageout
+'''
diff --git a/roles/lib_openshift/src/doc/manage_node b/roles/lib_openshift/src/doc/manage_node
index 382377f3e..b651ea4e7 100644
--- a/roles/lib_openshift/src/doc/manage_node
+++ b/roles/lib_openshift/src/doc/manage_node
@@ -3,7 +3,7 @@
DOCUMENTATION = '''
---
-module: oadm_manage_node
+module: oc_adm_manage_node
short_description: Module to manage openshift nodes
description:
- Manage openshift nodes programmatically.
@@ -75,13 +75,13 @@ extends_documentation_fragment: []
EXAMPLES = '''
- name: oadm manage-node --schedulable=true --selector=ops_node=new
- oadm_manage_node:
+ oc_adm_manage_node:
selector: ops_node=new
schedulable: True
register: schedout
- name: oadm manage-node my-k8s-node-5 --evacuate
- oadm_manage_node:
+ oc_adm_manage_node:
node: my-k8s-node-5
evacuate: True
force: True
diff --git a/roles/lib_openshift/src/doc/pvc b/roles/lib_openshift/src/doc/pvc
new file mode 100644
index 000000000..9240f2a0f
--- /dev/null
+++ b/roles/lib_openshift/src/doc/pvc
@@ -0,0 +1,76 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_pvc
+short_description: Modify, and idempotently manage openshift persistent volume claims
+description:
+ - Modify openshift persistent volume claims programmatically.
+options:
+ state:
+ description:
+ - Supported states, present, absent, list
+ - present - will ensure object is created or updated to the value specified
+ - list - will return a pvc
+ - absent - will remove a pvc
+ required: False
+ default: present
+ choices: ["present", 'absent', 'list']
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ volume_capacity:
+ description:
+ - The requested volume capacity
+ required: False
+ default: 1G
+ aliases: []
+ access_modes:
+ description:
+ - The access modes allowed for the pvc
+ - Expects a list
+ required: False
+ default: ReadWriteOnce
+ choices:
+ - ReadWriteOnce
+ - ReadOnlyMany
+ - ReadWriteMany
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: create a pvc
+ oc_pvc:
+ namespace: awesomeapp
+ name: dbstorage
+ access_modes:
+ - ReadWriteOnce
+ volume_capacity: 5G
+ register: pvcout
+'''
diff --git a/roles/lib_openshift/src/doc/user b/roles/lib_openshift/src/doc/user
new file mode 100644
index 000000000..65ee01eb7
--- /dev/null
+++ b/roles/lib_openshift/src/doc/user
@@ -0,0 +1,128 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_user
+short_description: Create, modify, and idempotently manage openshift users.
+description:
+ - Modify openshift users programmatically.
+options:
+ state:
+ description:
+ - State controls the action that will be taken with resource
+ - 'present' will create or update a user to the desired state
+ - 'absent' will ensure user is removed
+ - 'list' will read and return a list of users
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ username:
+ description:
+ - Short username to query/modify.
+ required: false
+ default: None
+ aliases: []
+ full_name:
+ description:
+ - String with the full name/description of the user.
+ required: false
+ default: None
+ aliases: []
+ groups:
+ description:
+ - List of groups the user should be a member of. This does not add/update the legacy 'groups' field in the OpenShift user object, but makes user entries into the appropriate OpenShift group object for the given user.
+ required: false
+ default: []
+ aliases: []
+author:
+- "Joel Diaz <jdiaz@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: Ensure user exists
+ oc_user:
+ state: present
+ username: johndoe
+ full_name "John Doe"
+ groups:
+ - dedicated-admins
+ register: user_johndoe
+
+user_johndoe variable will have contents like:
+ok: [ded-int-aws-master-61034] => {
+ "user_johndoe": {
+ "changed": true,
+ "results": {
+ "cmd": "oc -n default get users johndoe -o json",
+ "results": [
+ {
+ "apiVersion": "v1",
+ "fullName": "John DOe",
+ "groups": null,
+ "identities": null,
+ "kind": "User",
+ "metadata": {
+ "creationTimestamp": "2017-02-28T15:09:21Z",
+ "name": "johndoe",
+ "resourceVersion": "848781",
+ "selfLink": "/oapi/v1/users/johndoe",
+ "uid": "e23d3300-fdc7-11e6-9e3e-12822d6b7656"
+ }
+ }
+ ],
+ "returncode": 0
+ },
+ "state": "present"
+ }
+}
+'groups' is empty because this field is the OpenShift user object's 'group' field.
+
+- name: Ensure user does not exist
+ oc_user:
+ state: absent
+ username: johndoe
+
+- name: List user's info
+ oc_user:
+ state: list
+ username: johndoe
+ register: user_johndoe
+
+user_johndoe will have contents similar to:
+ok: [ded-int-aws-master-61034] => {
+ "user_johndoe": {
+ "changed": false,
+ "results": [
+ {
+ "apiVersion": "v1",
+ "fullName": "John Doe",
+ "groups": null,
+ "identities": null,
+ "kind": "User",
+ "metadata": {
+ "creationTimestamp": "2017-02-28T15:04:44Z",
+ "name": "johndoe",
+ "resourceVersion": "848280",
+ "selfLink": "/oapi/v1/users/johndoe",
+ "uid": "3d479ad2-fdc7-11e6-9e3e-12822d6b7656"
+ }
+ }
+ ],
+ "state": "list"
+ }
+}
+'''
diff --git a/roles/lib_openshift/src/doc/volume b/roles/lib_openshift/src/doc/volume
new file mode 100644
index 000000000..1d04afeef
--- /dev/null
+++ b/roles/lib_openshift/src/doc/volume
@@ -0,0 +1,105 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_volume
+short_description: Create, modify, and idempotently manage openshift volumes.
+description:
+ - Modify openshift volumes programmatically.
+options:
+ state:
+ description:
+ - State controls the action that will be taken with resource
+ - 'present' will create or update and object to the desired state
+ - 'absent' will ensure volumes are removed
+ - 'list' will read the volumes
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ namespace:
+ description:
+ - The name of the namespace where the object lives
+ required: false
+ default: default
+ aliases: []
+ kind:
+ description:
+ - The kind of object that can be managed.
+ default: dc
+ choices:
+ - dc
+ - rc
+ - pods
+ aliases: []
+ mount_type:
+ description:
+ - The type of volume to be used
+ required: false
+ default: None
+ choices:
+ - emptydir
+ - hostpath
+ - secret
+ - pvc
+ - configmap
+ aliases: []
+ mount_path:
+ description:
+ - The path to where the mount will be attached
+ required: false
+ default: None
+ aliases: []
+ secret_name:
+ description:
+ - The name of the secret. Used when mount_type is secret.
+ required: false
+ default: None
+ aliases: []
+ claim_size:
+ description:
+ - The size in GB of the pv claim. e.g. 100G
+ required: false
+ default: None
+ aliases: []
+ claim_name:
+ description:
+ - The name of the pv claim
+ required: false
+ default: None
+ aliases: []
+ configmap_name:
+ description:
+ - The name of the configmap
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: attach storage volumes to deploymentconfig
+ oc_volume:
+ namespace: logging
+ kind: dc
+ name: name_of_the_dc
+ mount_type: pvc
+ claim_name: loggingclaim
+ claim_size: 100G
+ vol_name: logging-storage
+ run_once: true
+'''
diff --git a/roles/lib_openshift/src/generate.py b/roles/lib_openshift/src/generate.py
index 3f23455b5..2570f51dd 100755
--- a/roles/lib_openshift/src/generate.py
+++ b/roles/lib_openshift/src/generate.py
@@ -5,12 +5,16 @@
import argparse
import os
+import re
import yaml
import six
OPENSHIFT_ANSIBLE_PATH = os.path.dirname(os.path.realpath(__file__))
OPENSHIFT_ANSIBLE_SOURCES_PATH = os.path.join(OPENSHIFT_ANSIBLE_PATH, 'sources.yml') # noqa: E501
LIBRARY = os.path.join(OPENSHIFT_ANSIBLE_PATH, '..', 'library/')
+SKIP_COVERAGE_PATTERN = [re.compile('class Yedit.*$'),
+ re.compile('class Utils.*$')]
+PRAGMA_STRING = ' # pragma: no cover'
class GenerateAnsibleException(Exception):
@@ -72,6 +76,11 @@ def generate(parts):
if idx in [0, 1] and 'flake8: noqa' in line or 'pylint: skip-file' in line: # noqa: E501
continue
+ for skip in SKIP_COVERAGE_PATTERN:
+ if re.match(skip, line):
+ line = line.strip()
+ line += PRAGMA_STRING + os.linesep
+
data.write(line)
fragment_banner(fpart, "footer", data)
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index 334542b97..1868b1420 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -95,11 +95,15 @@ class OpenShiftCLI(object):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
- def _delete(self, resource, rname, selector=None):
+ def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
- cmd = ['delete', resource, rname]
- if selector:
- cmd.append('--selector=%s' % selector)
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
@@ -117,7 +121,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -134,13 +138,13 @@ class OpenShiftCLI(object):
return self.openshift_cmd(['create', '-f', fname])
- def _get(self, resource, rname=None, selector=None):
+ def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
- if selector:
- cmd.append('--selector=%s' % selector)
- elif rname:
- cmd.append(rname)
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
cmd.extend(['-o', 'json'])
@@ -160,9 +164,9 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
- cmd.append('--schedulable=%s' % schedulable)
+ cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
@@ -177,10 +181,10 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
@@ -193,16 +197,16 @@ class OpenShiftCLI(object):
if node:
cmd.extend(node)
else:
- cmd.append('--selector=%s' % selector)
+ cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
- cmd.append('--pod-selector=%s' % pod_selector)
+ cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
- cmd.append('--grace-period=%s' % int(grace_period))
+ cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
@@ -283,9 +287,9 @@ class OpenShiftCLI(object):
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.args:
- err = err.args
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
diff --git a/roles/lib_openshift/src/lib/clusterrole.py b/roles/lib_openshift/src/lib/clusterrole.py
new file mode 100644
index 000000000..93ffababf
--- /dev/null
+++ b/roles/lib_openshift/src/lib/clusterrole.py
@@ -0,0 +1,68 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-public-methods
+class ClusterRole(Yedit):
+ ''' Class to model an openshift ClusterRole'''
+ rules_path = "rules"
+
+ def __init__(self, name=None, content=None):
+ ''' Constructor for clusterrole '''
+ if content is None:
+ content = ClusterRole.builder(name).yaml_dict
+
+ super(ClusterRole, self).__init__(content=content)
+
+ self.__rules = Rule.parse_rules(self.get(ClusterRole.rules_path)) or []
+
+ @property
+ def rules(self):
+ return self.__rules
+
+ @rules.setter
+ def rules(self, data):
+ self.__rules = data
+ self.put(ClusterRole.rules_path, self.__rules)
+
+ def rule_exists(self, inc_rule):
+ '''attempt to find the inc_rule in the rules list'''
+ for rule in self.rules:
+ if rule == inc_rule:
+ return True
+
+ return False
+
+ def compare(self, other, verbose=False):
+ '''compare function for clusterrole'''
+ for rule in other.rules:
+ if rule not in self.rules:
+ if verbose:
+ print('Rule in other not found in self. [{}]'.format(rule))
+ return False
+
+ for rule in self.rules:
+ if rule not in other.rules:
+ if verbose:
+ print('Rule in self not found in other. [{}]'.format(rule))
+ return False
+
+ return True
+
+ @staticmethod
+ def builder(name='default_clusterrole', rules=None):
+ '''return a clusterrole with name and/or rules'''
+ if rules is None:
+ rules = [{'apiGroups': [""],
+ 'attributeRestrictions': None,
+ 'verbs': [],
+ 'resources': []}]
+ content = {
+ 'apiVersion': 'v1',
+ 'kind': 'ClusterRole',
+ 'metadata': {'name': '{}'.format(name)},
+ 'rules': rules,
+ }
+
+ return ClusterRole(content=content)
+
diff --git a/roles/lib_openshift/src/lib/group.py b/roles/lib_openshift/src/lib/group.py
new file mode 100644
index 000000000..fac5fcbc2
--- /dev/null
+++ b/roles/lib_openshift/src/lib/group.py
@@ -0,0 +1,36 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+class GroupConfig(object):
+ ''' Handle route options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ namespace,
+ kubeconfig):
+ ''' constructor for handling group options '''
+ self.kubeconfig = kubeconfig
+ self.name = sname
+ self.namespace = namespace
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' return a service as a dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'Group'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ self.data['users'] = None
+
+
+# pylint: disable=too-many-instance-attributes
+class Group(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'group'
+
+ def __init__(self, content):
+ '''Group constructor'''
+ super(Group, self).__init__(content=content)
diff --git a/roles/lib_openshift/src/lib/pvc.py b/roles/lib_openshift/src/lib/pvc.py
new file mode 100644
index 000000000..929b50990
--- /dev/null
+++ b/roles/lib_openshift/src/lib/pvc.py
@@ -0,0 +1,167 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-instance-attributes
+class PersistentVolumeClaimConfig(object):
+ ''' Handle pvc options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ namespace,
+ kubeconfig,
+ access_modes=None,
+ vol_capacity='1G'):
+ ''' constructor for handling pvc options '''
+ self.kubeconfig = kubeconfig
+ self.name = sname
+ self.namespace = namespace
+ self.access_modes = access_modes
+ self.vol_capacity = vol_capacity
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' return a service as a dict '''
+ # version
+ self.data['apiVersion'] = 'v1'
+ # kind
+ self.data['kind'] = 'PersistentVolumeClaim'
+ # metadata
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ # spec
+ self.data['spec'] = {}
+ self.data['spec']['accessModes'] = ['ReadWriteOnce']
+ if self.access_modes:
+ self.data['spec']['accessModes'] = self.access_modes
+
+ # storage capacity
+ self.data['spec']['resources'] = {}
+ self.data['spec']['resources']['requests'] = {}
+ self.data['spec']['resources']['requests']['storage'] = self.vol_capacity
+
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class PersistentVolumeClaim(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ access_modes_path = "spec.accessModes"
+ volume_capacity_path = "spec.requests.storage"
+ volume_name_path = "spec.volumeName"
+ bound_path = "status.phase"
+ kind = 'PersistentVolumeClaim'
+
+ def __init__(self, content):
+ '''RoleBinding constructor'''
+ super(PersistentVolumeClaim, self).__init__(content=content)
+ self._access_modes = None
+ self._volume_capacity = None
+ self._volume_name = None
+
+ @property
+ def volume_name(self):
+ ''' volume_name property '''
+ if self._volume_name is None:
+ self._volume_name = self.get_volume_name()
+ return self._volume_name
+
+ @volume_name.setter
+ def volume_name(self, data):
+ ''' volume_name property setter'''
+ self._volume_name = data
+
+ @property
+ def access_modes(self):
+ ''' access_modes property '''
+ if self._access_modes is None:
+ self._access_modes = self.get_access_modes()
+ if not isinstance(self._access_modes, list):
+ self._access_modes = list(self._access_modes)
+
+ return self._access_modes
+
+ @access_modes.setter
+ def access_modes(self, data):
+ ''' access_modes property setter'''
+ if not isinstance(data, list):
+ data = list(data)
+
+ self._access_modes = data
+
+ @property
+ def volume_capacity(self):
+ ''' volume_capacity property '''
+ if self._volume_capacity is None:
+ self._volume_capacity = self.get_volume_capacity()
+ return self._volume_capacity
+
+ @volume_capacity.setter
+ def volume_capacity(self, data):
+ ''' volume_capacity property setter'''
+ self._volume_capacity = data
+
+ def get_access_modes(self):
+ '''get access_modes'''
+ return self.get(PersistentVolumeClaim.access_modes_path) or []
+
+ def get_volume_capacity(self):
+ '''get volume_capacity'''
+ return self.get(PersistentVolumeClaim.volume_capacity_path) or []
+
+ def get_volume_name(self):
+ '''get volume_name'''
+ return self.get(PersistentVolumeClaim.volume_name_path) or []
+
+ def is_bound(self):
+ '''return whether volume is bound'''
+ return self.get(PersistentVolumeClaim.bound_path) or []
+
+ #### ADD #####
+ def add_access_mode(self, inc_mode):
+ ''' add an access_mode'''
+ if self.access_modes:
+ self.access_modes.append(inc_mode)
+ else:
+ self.put(PersistentVolumeClaim.access_modes_path, [inc_mode])
+
+ return True
+
+ #### /ADD #####
+
+ #### Remove #####
+ def remove_access_mode(self, inc_mode):
+ ''' remove an access_mode'''
+ try:
+ self.access_modes.remove(inc_mode)
+ except ValueError as _:
+ return False
+
+ return True
+
+ #### /REMOVE #####
+
+ #### UPDATE #####
+ def update_access_mode(self, inc_mode):
+ ''' update an access_mode'''
+ try:
+ index = self.access_modes.index(inc_mode)
+ except ValueError as _:
+ return self.add_access_mode(inc_mode)
+
+ self.access_modes[index] = inc_mode
+
+ return True
+
+ #### /UPDATE #####
+
+ #### FIND ####
+ def find_access_mode(self, inc_mode):
+ ''' find a user '''
+ index = None
+ try:
+ index = self.access_modes.index(inc_mode)
+ except ValueError as _:
+ return index
+
+ return index
diff --git a/roles/lib_openshift/src/lib/rule.py b/roles/lib_openshift/src/lib/rule.py
new file mode 100644
index 000000000..4590dcf90
--- /dev/null
+++ b/roles/lib_openshift/src/lib/rule.py
@@ -0,0 +1,144 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+class Rule(object):
+ '''class to represent a clusterrole rule
+
+ Example Rule Object's yaml:
+ - apiGroups:
+ - ""
+ attributeRestrictions: null
+ resources:
+ - persistentvolumes
+ verbs:
+ - create
+ - delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
+
+ '''
+ def __init__(self,
+ api_groups=None,
+ attr_restrictions=None,
+ resources=None,
+ verbs=None):
+ self.__api_groups = api_groups if api_groups is not None else [""]
+ self.__verbs = verbs if verbs is not None else []
+ self.__resources = resources if resources is not None else []
+ self.__attribute_restrictions = attr_restrictions if attr_restrictions is not None else None
+
+ @property
+ def verbs(self):
+ '''property for verbs'''
+ if self.__verbs is None:
+ return []
+
+ return self.__verbs
+
+ @verbs.setter
+ def verbs(self, data):
+ '''setter for verbs'''
+ self.__verbs = data
+
+ @property
+ def api_groups(self):
+ '''property for api_groups'''
+ if self.__api_groups is None:
+ return []
+ return self.__api_groups
+
+ @api_groups.setter
+ def api_groups(self, data):
+ '''setter for api_groups'''
+ self.__api_groups = data
+
+ @property
+ def resources(self):
+ '''property for resources'''
+ if self.__resources is None:
+ return []
+
+ return self.__resources
+
+ @resources.setter
+ def resources(self, data):
+ '''setter for resources'''
+ self.__resources = data
+
+ @property
+ def attribute_restrictions(self):
+ '''property for attribute_restrictions'''
+ return self.__attribute_restrictions
+
+ @attribute_restrictions.setter
+ def attribute_restrictions(self, data):
+ '''setter for attribute_restrictions'''
+ self.__attribute_restrictions = data
+
+ def add_verb(self, inc_verb):
+ '''add a verb to the verbs array'''
+ self.verbs.append(inc_verb)
+
+ def add_api_group(self, inc_apigroup):
+ '''add an api_group to the api_groups array'''
+ self.api_groups.append(inc_apigroup)
+
+ def add_resource(self, inc_resource):
+ '''add an resource to the resources array'''
+ self.resources.append(inc_resource)
+
+ def remove_verb(self, inc_verb):
+ '''add a verb to the verbs array'''
+ try:
+ self.verbs.remove(inc_verb)
+ return True
+ except ValueError:
+ pass
+
+ return False
+
+ def remove_api_group(self, inc_api_group):
+ '''add a verb to the verbs array'''
+ try:
+ self.api_groups.remove(inc_api_group)
+ return True
+ except ValueError:
+ pass
+
+ return False
+
+ def remove_resource(self, inc_resource):
+ '''add a verb to the verbs array'''
+ try:
+ self.resources.remove(inc_resource)
+ return True
+ except ValueError:
+ pass
+
+ return False
+
+ def __eq__(self, other):
+ '''return whether rules are equal'''
+ return (self.attribute_restrictions == other.attribute_restrictions and
+ self.api_groups == other.api_groups and
+ self.resources == other.resources and
+ self.verbs == other.verbs)
+
+
+ @staticmethod
+ def parse_rules(inc_rules):
+ '''create rules from an array'''
+
+ results = []
+ for rule in inc_rules:
+ results.append(Rule(rule['apiGroups'],
+ rule['attributeRestrictions'],
+ rule['resources'],
+ rule['verbs']))
+
+ return results
diff --git a/roles/lib_openshift/src/lib/user.py b/roles/lib_openshift/src/lib/user.py
new file mode 100644
index 000000000..a14d5fc91
--- /dev/null
+++ b/roles/lib_openshift/src/lib/user.py
@@ -0,0 +1,37 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+class UserConfig(object):
+ ''' Handle user options '''
+ def __init__(self,
+ kubeconfig,
+ username,
+ full_name):
+ ''' constructor for handling user options '''
+ self.kubeconfig = kubeconfig
+ self.username = username
+ self.full_name = full_name
+
+ self.data = {}
+ self.create_dict()
+
+ def create_dict(self):
+ ''' return a user as a dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['fullName'] = self.full_name
+ self.data['groups'] = None
+ self.data['identities'] = None
+ self.data['kind'] = 'User'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.username
+
+
+# pylint: disable=too-many-instance-attributes
+class User(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'user'
+
+ def __init__(self, content):
+ '''User constructor'''
+ super(User, self).__init__(content=content)
diff --git a/roles/lib_openshift/src/lib/volume.py b/roles/lib_openshift/src/lib/volume.py
index e0abb1d1b..c049c8b49 100644
--- a/roles/lib_openshift/src/lib/volume.py
+++ b/roles/lib_openshift/src/lib/volume.py
@@ -2,7 +2,7 @@
# flake8: noqa
class Volume(object):
- ''' Class to model an openshift volume object'''
+ ''' Class to represent an openshift volume object'''
volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
"dc": "spec.template.spec.containers[0].volumeMounts",
"rc": "spec.template.spec.containers[0].volumeMounts",
@@ -34,5 +34,10 @@ class Volume(object):
elif volume_type == 'hostpath':
volume['hostPath'] = {}
volume['hostPath']['path'] = volume_info['path']
+ elif volume_type == 'configmap':
+ volume['configMap'] = {}
+ volume['configMap']['name'] = volume_info['configmap_name']
+ volume_mount = {'mountPath': volume_info['path'],
+ 'name': volume_info['name']}
return (volume, volume_mount)
diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml
index f16b3c8de..9fa2a6c0e 100644
--- a/roles/lib_openshift/src/sources.yml
+++ b/roles/lib_openshift/src/sources.yml
@@ -9,15 +9,15 @@ oc_adm_ca_server_cert.py:
- class/oc_adm_ca_server_cert.py
- ansible/oc_adm_ca_server_cert.py
-oadm_manage_node.py:
+oc_adm_manage_node.py:
- doc/generated
- doc/license
- lib/import.py
- doc/manage_node
- ../../lib_utils/src/class/yedit.py
- lib/base.py
-- class/oadm_manage_node.py
-- ansible/oadm_manage_node.py
+- class/oc_adm_manage_node.py
+- ansible/oc_adm_manage_node.py
oc_adm_policy_user.py:
- doc/generated
@@ -79,6 +79,28 @@ oc_atomic_container.py:
- doc/atomic_container
- ansible/oc_atomic_container.py
+oc_configmap.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/configmap
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oc_configmap.py
+- ansible/oc_configmap.py
+
+oc_clusterrole.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/clusterrole
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/rule.py
+- lib/clusterrole.py
+- class/oc_clusterrole.py
+- ansible/oc_clusterrole.py
+
oc_edit.py:
- doc/generated
- doc/license
@@ -100,6 +122,28 @@ oc_env.py:
- class/oc_env.py
- ansible/oc_env.py
+
+oc_group.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/group
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/group.py
+- class/oc_group.py
+- ansible/oc_group.py
+
+oc_image.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/image
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oc_image.py
+- ansible/oc_image.py
+
oc_label.py:
- doc/generated
- doc/license
@@ -141,6 +185,17 @@ oc_project.py:
- class/oc_project.py
- ansible/oc_project.py
+oc_pvc.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/pvc
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/pvc.py
+- class/oc_pvc.py
+- ansible/oc_pvc.py
+
oc_route.py:
- doc/generated
- doc/license
@@ -208,6 +263,17 @@ oc_service.py:
- class/oc_service.py
- ansible/oc_service.py
+oc_user.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/user
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/user.py
+- class/oc_user.py
+- ansible/oc_user.py
+
oc_version.py:
- doc/generated
- doc/license
@@ -218,6 +284,18 @@ oc_version.py:
- class/oc_version.py
- ansible/oc_version.py
+oc_volume.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/volume
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/deploymentconfig.py
+- lib/volume.py
+- class/oc_volume.py
+- ansible/oc_volume.py
+
oc_objectvalidator.py:
- doc/generated
- doc/license
diff --git a/roles/lib_openshift/src/test/integration/group.yml b/roles/lib_openshift/src/test/integration/group.yml
new file mode 100755
index 000000000..25aa5727b
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/group.yml
@@ -0,0 +1,229 @@
+#!/usr/bin/ansible-playbook
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+
+ vars:
+
+ post_tasks:
+ - name: delete test group (so future tests work)
+ oc_group:
+ state: absent
+ name: jgroup
+
+ - name: delete 2nd test group (so future tests work)
+ oc_group:
+ state: absent
+ name: jgroup2
+
+ - name: delete test user (so future tests work)
+ oc_user:
+ state: absent
+ username: jdiaz@redhat.com
+
+ - name: get group list
+ oc_group:
+ state: list
+ name: jgroup
+ register: group_out
+ #- debug: var=group_out
+ - name: assert group 'jgroup' (test group) does not exist
+ assert:
+ that: group_out['results'][0] == {}
+
+ - name: get group list
+ oc_group:
+ state: list
+ name: jgroup2
+ register: group_out
+ #- debug: var=group_out
+ - name: assert group 'jgroup2' (test group) does not exist
+ assert:
+ that: group_out['results'][0] == {}
+
+ - name: get user list
+ oc_user:
+ state: list
+ username: 'jdiaz@redhat.com'
+ register: group_out
+ #- debug: var=group_out
+ - name: assert user 'jdiaz@redhat.com' (test user) does not exist
+ assert:
+ that: group_out['results'][0] == {}
+
+ - name: create group
+ oc_group:
+ state: present
+ name: jgroup
+ register: group_out
+ #- debug: var=group_out
+ - name: assert creating group marked changed
+ assert:
+ that: group_out['changed'] == True
+
+ - name: list group
+ oc_group:
+ state: list
+ name: jgroup
+ register: group_out
+ #- debug: var=group_out
+ - name: assert group actually created
+ assert:
+ that: group_out['results'][0]['metadata']['name'] == 'jgroup'
+
+ - name: re-add group
+ oc_group:
+ state: present
+ name: jgroup
+ register: group_out
+ #- debug: var=group_out
+ - name: assert re-adding group marked not changed
+ assert:
+ that: group_out['changed'] == False
+
+
+ - name: add user with group membership
+ oc_user:
+ state: present
+ username: jdiaz@redhat.com
+ full_name: Joel Diaz
+ groups:
+ - jgroup
+ register: group_out
+ #- debug: var=group_out
+
+ - name: get group
+ oc_group:
+ state: list
+ name: jgroup
+ register: group_out
+ - name: assert user in group
+ assert:
+ that: group_out['results'][0]['users'][0] == 'jdiaz@redhat.com'
+
+ - name: add 2nd group
+ oc_group:
+ state: present
+ name: jgroup2
+
+ - name: change group membership
+ oc_user:
+ state: present
+ username: jdiaz@redhat.com
+ full_name: Joel Diaz
+ groups:
+ - jgroup2
+ register: group_out
+ - name: assert result changed
+ assert:
+ that: group_out['changed'] == True
+
+ - name: check jgroup user membership
+ oc_group:
+ state: list
+ name: jgroup
+ register: group_out
+ #- debug: var=group_out
+ - name: assert user not present in previous group
+ assert:
+ that: group_out['results'][0]['users'] == []
+
+ - name: check jgroup2 user membership
+ oc_group:
+ state: list
+ name: jgroup2
+ register: group_out
+ #- debug: var=group_out
+ - name: assert user present in new group
+ assert:
+ that: group_out['results'][0]['users'][0] == 'jdiaz@redhat.com'
+
+ - name: multi-group membership
+ oc_user:
+ state: present
+ username: jdiaz@redhat.com
+ full_name: Joel Diaz
+ groups:
+ - jgroup
+ - jgroup2
+ register: group_out
+ - name: assert result changed
+ assert:
+ that: group_out['changed'] == True
+
+ - name: check jgroup user membership
+ oc_group:
+ state: list
+ name: jgroup
+ register: group_out
+ #- debug: var=group_out
+ - name: assert user present in group
+ assert:
+ that: group_out['results'][0]['users'][0] == 'jdiaz@redhat.com'
+
+ - name: check jgroup2 user membership
+ oc_group:
+ state: list
+ name: jgroup2
+ register: group_out
+ #- debug: var=group_out
+ - name: assert user still present in group
+ assert:
+ that: group_out['results'][0]['users'][0] == 'jdiaz@redhat.com'
+
+ - name: user delete (group cleanup)
+ oc_user:
+ state: absent
+ username: jdiaz@redhat.com
+ register: group_out
+
+ - name: get user list for jgroup
+ oc_group:
+ state: list
+ name: jgroup
+ register: group_out
+ #- debug: var=group_out
+ - name: assert that group jgroup has no members
+ assert:
+ that: group_out['results'][0]['users'] == []
+
+ - name: get user list for jgroup2
+ oc_group:
+ state: list
+ name: jgroup2
+ register: group_out
+ #- debug: var=group_out
+ - name: assert that group jgroup2 has no members
+ assert:
+ that: group_out['results'][0]['users'] == []
+
+ - name: user without groups defined
+ oc_user:
+ state: present
+ username: jdiaz@redhat.com
+ full_name: Joel Diaz
+ register: group_out
+ - name: assert result changed
+ assert:
+ that: group_out['changed'] == True
+
+ - name: check jgroup user membership
+ oc_group:
+ state: list
+ name: jgroup
+ register: group_out
+ #- debug: var=group_out
+ - name: assert user not present in group
+ assert:
+ that: group_out['results'][0]['users'] == []
+
+ - name: check jgroup2 user membership
+ oc_group:
+ state: list
+ name: jgroup2
+ register: group_out
+ #- debug: var=group_out
+ - name: assert user not present in group
+ assert:
+ that: group_out['results'][0]['users'] == []
diff --git a/roles/lib_openshift/src/test/integration/oadm_manage_node.yml b/roles/lib_openshift/src/test/integration/oc_adm_manage_node.yml
index 3ee13a409..1ed2ef11b 100755
--- a/roles/lib_openshift/src/test/integration/oadm_manage_node.yml
+++ b/roles/lib_openshift/src/test/integration/oc_adm_manage_node.yml
@@ -1,6 +1,6 @@
#!/usr/bin/ansible-playbook --module-path=../../../library/
#
-# ./oadm_manage_node.yml -e "cli_master_test=$OPENSHIFT_MASTER
+# ./oc_adm_manage_node.yml -e "cli_master_test=$OPENSHIFT_MASTER
---
- hosts: "{{ cli_master_test }}"
gather_facts: no
@@ -17,7 +17,7 @@
node_to_test: "{{ obj_out['results']['results'][0]['items'][0]['metadata']['name'] }}"
- name: list pods from a node
- oadm_manage_node:
+ oc_adm_manage_node:
list_pods: True
node:
- "{{ node_to_test }}"
@@ -29,7 +29,7 @@
msg: Pod data was not returned
- name: set node to unschedulable
- oadm_manage_node:
+ oc_adm_manage_node:
schedulable: False
node:
- "{{ node_to_test }}"
@@ -56,7 +56,7 @@
that: nodeout.results.results[0]['spec']['unschedulable']
- name: set node to schedulable
- oadm_manage_node:
+ oc_adm_manage_node:
schedulable: True
node:
- "{{ node_to_test }}"
diff --git a/roles/lib_openshift/src/test/integration/oc_clusterrole.yml b/roles/lib_openshift/src/test/integration/oc_clusterrole.yml
new file mode 100755
index 000000000..91b143f55
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_clusterrole.yml
@@ -0,0 +1,106 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+## ./oc_configmap.yml -M ../../../library -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+
+ post_tasks:
+ - name: create a test project
+ oc_project:
+ name: test
+ description: for tests only
+
+ ###### create test ###########
+ - name: create a clusterrole
+ oc_clusterrole:
+ state: present
+ name: operations
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ attributeRestrictions: null
+ verbs:
+ - create
+ - delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
+
+ - name: fetch the created clusterrole
+ oc_clusterrole:
+ name: operations
+ state: list
+ register: croleout
+
+ - debug: var=croleout
+
+ - name: assert clusterrole exists
+ assert:
+ that:
+ - croleout.results.results.metadata.name == 'operations'
+ - croleout.results.results.rules[0].resources[0] == 'persistentvolumes'
+ ###### end create test ###########
+
+ ###### update test ###########
+ - name: update a clusterrole
+ oc_clusterrole:
+ state: present
+ name: operations
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ - serviceaccounts
+ - services
+ attributeRestrictions: null
+ verbs:
+ - create
+ - delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
+
+ - name: fetch the created clusterrole
+ oc_clusterrole:
+ name: operations
+ state: list
+ register: croleout
+
+ - debug: var=croleout
+
+ - name: assert clusterrole is updated
+ assert:
+ that:
+ - croleout.results.results.metadata.name == 'operations'
+ - "'persistentvolumes' in croleout.results.results.rules[0].resources"
+ - "'serviceaccounts' in croleout.results.results.rules[0].resources"
+ - "'services' in croleout.results.results.rules[0].resources"
+ ###### end create test ###########
+
+ ###### delete test ###########
+ - name: delete a clusterrole
+ oc_clusterrole:
+ state: absent
+ name: operations
+
+ - name: fetch the clusterrole
+ oc_clusterrole:
+ name: operations
+ state: list
+ register: croleout
+
+ - debug: var=croleout
+
+ - name: assert operations does not exist
+ assert:
+ that: "'\"operations\" not found' in croleout.results.stderr"
diff --git a/roles/lib_openshift/src/test/integration/oc_configmap.yml b/roles/lib_openshift/src/test/integration/oc_configmap.yml
new file mode 100755
index 000000000..c0d200e73
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_configmap.yml
@@ -0,0 +1,95 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+## ./oc_configmap.yml -M ../../../library -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ vars:
+ filename: /tmp/test_configmap_from_file
+
+ post_tasks:
+ - name: Setup a file with known contents
+ copy:
+ content: This is a file
+ dest: "{{ filename }}"
+
+ - name: create a test project
+ oc_project:
+ name: test
+ description: for tests only
+
+ ###### create test ###########
+ - name: create a configmap
+ oc_configmap:
+ state: present
+ name: configmaptest
+ namespace: test
+ from_file:
+ config: "{{ filename }}"
+ from_literal:
+ foo: bar
+
+ - name: fetch the created configmap
+ oc_configmap:
+ name: configmaptest
+ state: list
+ namespace: test
+ register: cmout
+
+ - debug: var=cmout
+
+ - name: assert configmaptest exists
+ assert:
+ that:
+ - cmout.results.results[0].metadata.name == 'configmaptest'
+ - cmout.results.results[0].data.foo == 'bar'
+ ###### end create test ###########
+
+ ###### update test ###########
+ - name: create a configmap
+ oc_configmap:
+ state: present
+ name: configmaptest
+ namespace: test
+ from_file:
+ config: "{{ filename }}"
+ from_literal:
+ foo: notbar
+ deployment_type: online
+
+ - name: fetch the updated configmap
+ oc_configmap:
+ name: configmaptest
+ state: list
+ namespace: test
+ register: cmout
+
+ - debug: var=cmout
+
+ - name: assert configmaptest exists
+ assert:
+ that:
+ - cmout.results.results[0].metadata.name == 'configmaptest'
+ - cmout.results.results[0].data.deployment_type == 'online'
+ - cmout.results.results[0].data.foo == 'notbar'
+ ###### end update test ###########
+
+ ###### delete test ###########
+ - name: delete a configmap
+ oc_configmap:
+ state: absent
+ name: configmaptest
+ namespace: test
+
+ - name: fetch the updated configmap
+ oc_configmap:
+ name: configmaptest
+ state: list
+ namespace: test
+ register: cmout
+
+ - debug: var=cmout
+
+ - name: assert configmaptest exists
+ assert:
+ that: "'\"configmaptest\" not found' in cmout.results.stderr"
diff --git a/roles/lib_openshift/src/test/integration/oc_obj.yml b/roles/lib_openshift/src/test/integration/oc_obj.yml
new file mode 100755
index 000000000..c22a2f6a9
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_obj.yml
@@ -0,0 +1,207 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oc_obj.yml -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ tasks:
+ - name: create test project
+ oc_project:
+ name: test
+ description: all things test
+ node_selector: ""
+
+ # Create Check #
+ - name: create a dc
+ oc_obj:
+ state: present
+ name: mysql
+ namespace: test
+ kind: dc
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ labels:
+ name: mysql
+ name: mysql
+ spec:
+ replicas: 1
+ selector: {}
+ strategy:
+ resources: {}
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: mysql
+ spec:
+ containers:
+ - env:
+ - name: MYSQL_USER
+ value: mysql
+ - name: MYSQL_PASSWORD
+ value: mysql
+ - name: MYSQL_DATABASE
+ value: mysql
+ - name: MYSQL_ROOT_PASSWORD
+ value: mysql
+ image: openshift/mysql-55-centos7:latest
+ imagePullPolicy: Always
+ name: mysql
+ ports:
+ - containerPort: 3306
+ name: tcp-3306
+ protocol: TCP
+ resources: {}
+ securityContext:
+ capabilities: {}
+ privileged: false
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 31
+ triggers:
+ - type: ConfigChange
+ - imageChangeParams:
+ automatic: true
+ containerNames:
+ - mysql
+ from:
+ kind: ImageStreamTag
+ name: mysql:latest
+ type: ImageChange
+
+ - name: fetch created dc
+ oc_obj:
+ name: mysql
+ kind: dc
+ state: list
+ namespace: test
+ register: dcout
+
+ - debug: var=dcout
+
+ - assert:
+ that:
+ - dcout.results.returncode == 0
+ - dcout.results.results[0].metadata.name == 'mysql'
+ # End Create Check #
+
+
+ # Delete Check #
+ - name: delete created dc
+ oc_obj:
+ name: mysql
+ kind: dc
+ state: absent
+ namespace: test
+ register: dcout
+
+ - name: fetch delete dc
+ oc_obj:
+ name: mysql
+ kind: dc
+ state: list
+ namespace: test
+ register: dcout
+
+ - debug: var=dcout
+
+ - assert:
+ that:
+ - dcout.results.returncode == 0
+ - "'\"mysql\" not found' in dcout.results.stderr"
+ # End Delete Check #
+
+ # Delete selector Check #
+ - name: create a dc
+ oc_obj:
+ state: present
+ name: mysql
+ namespace: test
+ kind: dc
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ labels:
+ name: mysql
+ name: mysql
+ spec:
+ replicas: 1
+ selector: {}
+ strategy:
+ resources: {}
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: mysql
+ spec:
+ containers:
+ - env:
+ - name: MYSQL_USER
+ value: mysql
+ - name: MYSQL_PASSWORD
+ value: mysql
+ - name: MYSQL_DATABASE
+ value: mysql
+ - name: MYSQL_ROOT_PASSWORD
+ value: mysql
+ image: openshift/mysql-55-centos7:latest
+ imagePullPolicy: Always
+ name: mysql
+ ports:
+ - containerPort: 3306
+ name: tcp-3306
+ protocol: TCP
+ resources: {}
+ securityContext:
+ capabilities: {}
+ privileged: false
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 31
+ triggers:
+ - type: ConfigChange
+ - imageChangeParams:
+ automatic: true
+ containerNames:
+ - mysql
+ from:
+ kind: ImageStreamTag
+ name: mysql:latest
+ type: ImageChange
+
+ - name: delete using selector
+ oc_obj:
+ namespace: test
+ selector: name=mysql
+ kind: dc
+ state: absent
+ register: dcout
+
+ - debug: var=dcout
+
+ - name: get the dc
+ oc_obj:
+ namespace: test
+ selector: name=mysql
+ kind: dc
+ state: list
+ register: dcout
+
+ - debug: var=dcout
+
+ - assert:
+ that:
+ - dcout.results.returncode == 0
+ - dcout.results.results[0]["items"]|length == 0
diff --git a/roles/lib_openshift/src/test/integration/oc_user.yml b/roles/lib_openshift/src/test/integration/oc_user.yml
new file mode 100755
index 000000000..ad1f9d188
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_user.yml
@@ -0,0 +1,240 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+#
+# ./oc_user.yml -e "cli_master_test=$OPENSHIFT_MASTER
+#
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+
+ vars:
+ test_user: testuser@email.com
+ test_user_fullname: "Test User"
+ pre_tasks:
+ - name: ensure needed vars are defined
+ fail:
+ msg: "{{ item }} no defined"
+ when: "{{ item}} is not defined"
+ with_items:
+ - cli_master_test # ansible inventory instance to run playbook against
+
+ tasks:
+ - name: delete test user (so future tests work)
+ oc_user:
+ state: absent
+ username: "{{ test_user }}"
+
+ - name: get user list
+ oc_user:
+ state: list
+ username: "{{ test_user }}"
+ register: user_out
+ - name: "assert test user does not exist"
+ assert:
+ that: user_out['results'][0] == {}
+ msg: "{{ user_out }}"
+
+ - name: get all list
+ oc_user:
+ state: list
+ register: user_out
+ #- debug: var=user_out
+
+ - name: add test user
+ oc_user:
+ state: present
+ username: "{{ test_user }}"
+ full_name: "{{ test_user_fullname }}"
+ register: user_out
+ - name: assert result set to changed
+ assert:
+ that: user_out['changed'] == True
+ msg: "{{ user_out }}"
+
+ - name: check test user actually added
+ oc_user:
+ state: list
+ username: "{{ test_user }}"
+ register: user_out
+ - name: assert user actually added
+ assert:
+ that: user_out['results'][0]['metadata']['name'] == "{{ test_user }}" and
+ user_out['results'][0]['fullName'] == "{{ test_user_fullname }}"
+ msg: "{{ user_out }}"
+
+ - name: re-add test user
+ oc_user:
+ state: present
+ username: "{{ test_user }}"
+ full_name: "{{ test_user_fullname }}"
+ register: user_out
+ - name: assert re-add result set to not changed
+ assert:
+ that: user_out['changed'] == False
+ msg: "{{ user_out }}"
+
+ - name: modify existing user
+ oc_user:
+ state: present
+ username: "{{ test_user }}"
+ full_name: 'Something Different'
+ register: user_out
+ - name: assert modify existing user result set to changed
+ assert:
+ that: user_out['changed'] == True
+ msg: "{{ user_out }}"
+
+ - name: check modify test user
+ oc_user:
+ state: list
+ username: "{{ test_user }}"
+ register: user_out
+ - name: assert modification successful
+ assert:
+ that: user_out['results'][0]['metadata']['name'] == "{{ test_user }}" and
+ user_out['results'][0]['fullName'] == 'Something Different'
+ msg: "{{ user_out }}"
+
+ - name: delete test user
+ oc_user:
+ state: absent
+ username: "{{ test_user }}"
+ register: user_out
+ - name: assert delete marked changed
+ assert:
+ that: user_out['changed'] == True
+ msg: "{{ user_out }}"
+
+ - name: check delete user
+ oc_user:
+ state: list
+ username: "{{ test_user }}"
+ register: user_out
+ - name: assert deletion successful
+ assert:
+ that: user_out['results'][0] == {}
+ msg: "{{ user_out }}"
+
+ - name: re-delete test user
+ oc_user:
+ state: absent
+ username: "{{ test_user }}"
+ register: user_out
+ - name: check re-delete marked not changed
+ assert:
+ that: user_out['changed'] == False
+ msg: "{{ user_out }}"
+
+ - name: delete test group
+ oc_obj:
+ kind: group
+ state: absent
+ name: integration-test-group
+
+ - name: create test group
+ command: oadm groups new integration-test-group
+
+ - name: check group creation
+ oc_obj:
+ kind: group
+ state: list
+ name: integration-test-group
+ register: user_out
+ - name: assert test group created
+ assert:
+ that: user_out['results']['results'][0]['metadata']['name'] == "integration-test-group"
+ msg: "{{ user_out }}"
+
+ - name: create user with group membership
+ oc_user:
+ state: present
+ username: "{{ test_user }}"
+ groups:
+ - "integration-test-group"
+ register: user_out
+ - debug: var=user_out
+ - name: get group user members
+ oc_obj:
+ kind: group
+ state: list
+ name: integration-test-group
+ register: user_out
+ - name: assert user group membership
+ assert:
+ that: "'{{ test_user }}' in user_out['results']['results'][0]['users'][0]"
+ msg: "{{ user_out }}"
+
+ - name: delete second test group
+ oc_obj:
+ kind: group
+ state: absent
+ name: integration-test-group2
+
+ - name: create empty second group
+ command: oadm groups new integration-test-group2
+
+ - name: update user with second group membership
+ oc_user:
+ state: present
+ username: "{{ test_user }}"
+ groups:
+ - "integration-test-group"
+ - "integration-test-group2"
+ register: user_out
+ - name: assert adding more group changed
+ assert:
+ that: user_out['changed'] == True
+
+ - name: get group memberships
+ oc_obj:
+ kind: group
+ state: list
+ name: "{{ item }}"
+ with_items:
+ - integration-test-group
+ - integration-test-group2
+ register: user_out
+ - name: assert user member of above groups
+ assert:
+ that: "'{{ test_user }}' in user_out['results'][0]['results']['results'][0]['users'] and \
+ '{{ test_user }}' in user_out['results'][1]['results']['results'][0]['users']"
+ msg: "{{ user_out }}"
+
+ - name: update user with only one group
+ oc_user:
+ state: present
+ username: "{{ test_user }}"
+ groups:
+ - "integration-test-group2"
+ register: user_out
+ - assert:
+ that: user_out['changed'] == True
+
+ - name: get group memberships
+ oc_obj:
+ kind: group
+ state: list
+ name: "{{ item }}"
+ with_items:
+ - "integration-test-group"
+ - "integration-test-group2"
+ register: user_out
+ - debug: var=user_out
+ - name: assert proper user membership
+ assert:
+ that: "'{{ test_user }}' not in user_out['results'][0]['results']['results'][0]['users'] and \
+ '{{ test_user }}' in user_out['results'][1]['results']['results'][0]['users']"
+
+ - name: clean up test groups
+ oc_obj:
+ kind: group
+ state: absent
+ name: "{{ item }}"
+ with_items:
+ - "integration-test-group"
+ - "integration-test-group2"
+
+ - name: clean up test user
+ oc_user:
+ state: absent
+ username: "{{ test_user }}"
diff --git a/roles/lib_openshift/src/test/unit/test_oadm_manage_node.py b/roles/lib_openshift/src/test/unit/test_oc_adm_manage_node.py
index 27d98b869..312b1ecbb 100755
--- a/roles/lib_openshift/src/test/unit/test_oadm_manage_node.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_manage_node.py
@@ -1,5 +1,5 @@
'''
- Unit tests for oadm_manage_node
+ Unit tests for oc_adm_manage_node
'''
import os
@@ -16,16 +16,16 @@ import mock
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
-from oadm_manage_node import ManageNode, locate_oc_binary # noqa: E402
+from oc_adm_manage_node import ManageNode, locate_oc_binary # noqa: E402
class ManageNodeTest(unittest.TestCase):
'''
- Test class for oadm_manage_node
+ Test class for oc_adm_manage_node
'''
- @mock.patch('oadm_manage_node.Utils.create_tmpfile_copy')
- @mock.patch('oadm_manage_node.ManageNode.openshift_cmd')
+ @mock.patch('oc_adm_manage_node.Utils.create_tmpfile_copy')
+ @mock.patch('oc_adm_manage_node.ManageNode.openshift_cmd')
def test_list_pods(self, mock_openshift_cmd, mock_tmpfile_copy):
''' Testing a get '''
params = {'node': ['ip-172-31-49-140.ec2.internal'],
@@ -107,8 +107,8 @@ class ManageNodeTest(unittest.TestCase):
# returned 2 pods
self.assertTrue(len(results['results']['nodes']['ip-172-31-49-140.ec2.internal']) == 2)
- @mock.patch('oadm_manage_node.Utils.create_tmpfile_copy')
- @mock.patch('oadm_manage_node.ManageNode.openshift_cmd')
+ @mock.patch('oc_adm_manage_node.Utils.create_tmpfile_copy')
+ @mock.patch('oc_adm_manage_node.ManageNode.openshift_cmd')
def test_schedulable_false(self, mock_openshift_cmd, mock_tmpfile_copy):
''' Testing a get '''
params = {'node': ['ip-172-31-49-140.ec2.internal'],
diff --git a/roles/lib_openshift/src/test/unit/test_oc_clusterrole.py b/roles/lib_openshift/src/test/unit/test_oc_clusterrole.py
new file mode 100755
index 000000000..189f16bda
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_clusterrole.py
@@ -0,0 +1,115 @@
+'''
+ Unit tests for oc clusterrole
+'''
+
+import copy
+import os
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error,wrong-import-position
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_clusterrole import OCClusterRole # noqa: E402
+
+
+class OCClusterRoleTest(unittest.TestCase):
+ '''
+ Test class for OCClusterRole
+ '''
+
+ # run_ansible input parameters
+ params = {
+ 'state': 'present',
+ 'name': 'operations',
+ 'rules': [
+ {'apiGroups': [''],
+ 'attributeRestrictions': None,
+ 'verbs': ['create', 'delete', 'deletecollection',
+ 'get', 'list', 'patch', 'update', 'watch'],
+ 'resources': ['persistentvolumes']}
+ ],
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False,
+ }
+
+ @mock.patch('oc_clusterrole.locate_oc_binary')
+ @mock.patch('oc_clusterrole.Utils.create_tmpfile_copy')
+ @mock.patch('oc_clusterrole.Utils._write')
+ @mock.patch('oc_clusterrole.OCClusterRole._run')
+ def test_adding_a_clusterrole(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_loc_binary):
+ ''' Testing adding a project '''
+
+ params = copy.deepcopy(OCClusterRoleTest.params)
+
+ clusterrole = '''{
+ "apiVersion": "v1",
+ "kind": "ClusterRole",
+ "metadata": {
+ "creationTimestamp": "2017-03-27T14:19:09Z",
+ "name": "operations",
+ "resourceVersion": "23",
+ "selfLink": "/oapi/v1/clusterrolesoperations",
+ "uid": "57d358fe-12f8-11e7-874a-0ec502977670"
+ },
+ "rules": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "attributeRestrictions": null,
+ "resources": [
+ "persistentvolumes"
+ ],
+ "verbs": [
+ "create",
+ "delete",
+ "deletecollection",
+ "get",
+ "list",
+ "patch",
+ "update",
+ "watch"
+ ]
+ }
+ ]
+ }'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ (1, '', 'Error from server: clusterrole "operations" not found'),
+ (1, '', 'Error from server: namespaces "operations" not found'),
+ (0, '', ''), # created
+ (0, clusterrole, ''), # fetch it
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ mock_loc_binary.side_effect = [
+ 'oc',
+ ]
+
+ # Act
+ results = OCClusterRole.run_ansible(params, False)
+
+ # Assert
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['results']['returncode'], 0)
+ self.assertEqual(results['results']['results']['metadata']['name'], 'operations')
+ self.assertEqual(results['state'], 'present')
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'clusterrole', 'operations', '-o', 'json'], None),
+ mock.call(['oc', 'get', 'clusterrole', 'operations', '-o', 'json'], None),
+ mock.call(['oc', 'create', '-f', mock.ANY], None),
+ mock.call(['oc', 'get', 'clusterrole', 'operations', '-o', 'json'], None),
+ ])
diff --git a/roles/lib_openshift/src/test/unit/test_oc_configmap.py b/roles/lib_openshift/src/test/unit/test_oc_configmap.py
new file mode 100755
index 000000000..318fd6167
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_configmap.py
@@ -0,0 +1,239 @@
+'''
+ Unit tests for oc configmap
+'''
+
+import copy
+import os
+import six
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error,wrong-import-position
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_configmap import OCConfigMap, locate_oc_binary # noqa: E402
+
+
+class OCConfigMapTest(unittest.TestCase):
+ '''
+ Test class for OCConfigMap
+ '''
+ params = {'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'state': 'present',
+ 'debug': False,
+ 'name': 'configmap',
+ 'from_file': {},
+ 'from_literal': {},
+ 'namespace': 'test'}
+
+ @mock.patch('oc_configmap.Utils._write')
+ @mock.patch('oc_configmap.Utils.create_tmpfile_copy')
+ @mock.patch('oc_configmap.OCConfigMap._run')
+ def test_create_configmap(self, mock_run, mock_tmpfile_copy, mock_write):
+ ''' Testing a configmap create '''
+ # TODO
+ return
+ params = copy.deepcopy(OCConfigMapTest.params)
+ params['from_file'] = {'test': '/root/file'}
+ params['from_literal'] = {'foo': 'bar'}
+
+ configmap = '''{
+ "apiVersion": "v1",
+ "data": {
+ "foo": "bar",
+ "test": "this is a file\\n"
+ },
+ "kind": "ConfigMap",
+ "metadata": {
+ "creationTimestamp": "2017-03-20T20:24:35Z",
+ "name": "configmap",
+ "namespace": "test"
+ }
+ }'''
+
+ mock_run.side_effect = [
+ (1, '', 'Error from server (NotFound): configmaps "configmap" not found'),
+ (0, '', ''),
+ (0, configmap, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCConfigMap.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['results']['results'][0]['metadata']['name'], 'configmap')
+
+ @mock.patch('oc_configmap.Utils._write')
+ @mock.patch('oc_configmap.Utils.create_tmpfile_copy')
+ @mock.patch('oc_configmap.OCConfigMap._run')
+ def test_update_configmap(self, mock_run, mock_tmpfile_copy, mock_write):
+ ''' Testing a configmap create '''
+ params = copy.deepcopy(OCConfigMapTest.params)
+ params['from_file'] = {'test': '/root/file'}
+ params['from_literal'] = {'foo': 'bar', 'deployment_type': 'online'}
+
+ configmap = '''{
+ "apiVersion": "v1",
+ "data": {
+ "foo": "bar",
+ "test": "this is a file\\n"
+ },
+ "kind": "ConfigMap",
+ "metadata": {
+ "creationTimestamp": "2017-03-20T20:24:35Z",
+ "name": "configmap",
+ "namespace": "test"
+
+ }
+ }'''
+
+ mod_configmap = '''{
+ "apiVersion": "v1",
+ "data": {
+ "foo": "bar",
+ "deployment_type": "online",
+ "test": "this is a file\\n"
+ },
+ "kind": "ConfigMap",
+ "metadata": {
+ "creationTimestamp": "2017-03-20T20:24:35Z",
+ "name": "configmap",
+ "namespace": "test"
+
+ }
+ }'''
+
+ mock_run.side_effect = [
+ (0, configmap, ''),
+ (0, mod_configmap, ''),
+ (0, configmap, ''),
+ (0, '', ''),
+ (0, mod_configmap, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCConfigMap.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['results']['results'][0]['metadata']['name'], 'configmap')
+ self.assertEqual(results['results']['results'][0]['data']['deployment_type'], 'online')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
diff --git a/roles/lib_openshift/src/test/unit/test_oc_group.py b/roles/lib_openshift/src/test/unit/test_oc_group.py
new file mode 100755
index 000000000..8eef37810
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_group.py
@@ -0,0 +1,253 @@
+'''
+ Unit tests for oc group
+'''
+
+import copy
+import os
+import six
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error,wrong-import-position
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_group import OCGroup, locate_oc_binary # noqa: E402
+
+
+class OCGroupTest(unittest.TestCase):
+ '''
+ Test class for OCGroup
+ '''
+ params = {'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'state': 'present',
+ 'debug': False,
+ 'name': 'acme',
+ 'namespace': 'test'}
+
+ @mock.patch('oc_group.Utils.create_tmpfile_copy')
+ @mock.patch('oc_group.OCGroup._run')
+ def test_create_group(self, mock_run, mock_tmpfile_copy):
+ ''' Testing a group create '''
+ params = copy.deepcopy(OCGroupTest.params)
+
+ group = '''{
+ "kind": "Group",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "acme"
+ },
+ "users": []
+ }'''
+
+ mock_run.side_effect = [
+ (1, '', 'Error from server: groups "acme" not found'),
+ (1, '', 'Error from server: groups "acme" not found'),
+ (0, '', ''),
+ (0, group, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCGroup.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['results']['results'][0]['metadata']['name'], 'acme')
+
+ @mock.patch('oc_group.Utils.create_tmpfile_copy')
+ @mock.patch('oc_group.OCGroup._run')
+ def test_failed_get_group(self, mock_run, mock_tmpfile_copy):
+ ''' Testing a group create '''
+ params = copy.deepcopy(OCGroupTest.params)
+ params['state'] = 'list'
+ params['name'] = 'noexist'
+
+ mock_run.side_effect = [
+ (1, '', 'Error from server: groups "acme" not found'),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCGroup.run_ansible(params, False)
+
+ self.assertTrue(results['failed'])
+
+ @mock.patch('oc_group.Utils.create_tmpfile_copy')
+ @mock.patch('oc_group.OCGroup._run')
+ def test_delete_group(self, mock_run, mock_tmpfile_copy):
+ ''' Testing a group create '''
+ params = copy.deepcopy(OCGroupTest.params)
+ params['state'] = 'absent'
+
+ group = '''{
+ "kind": "Group",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "acme"
+ },
+ "users": [
+ "user1"
+ ]
+ }'''
+
+ mock_run.side_effect = [
+ (0, group, ''),
+ (0, '', ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCGroup.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+
+ @mock.patch('oc_group.Utils.create_tmpfile_copy')
+ @mock.patch('oc_group.OCGroup._run')
+ def test_get_group(self, mock_run, mock_tmpfile_copy):
+ ''' Testing a group create '''
+ params = copy.deepcopy(OCGroupTest.params)
+ params['state'] = 'list'
+
+ group = '''{
+ "kind": "Group",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "acme"
+ },
+ "users": [
+ "user1"
+ ]
+ }'''
+
+ mock_run.side_effect = [
+ (0, group, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCGroup.run_ansible(params, False)
+
+ self.assertFalse(results['changed'])
+ self.assertEqual(results['results'][0]['metadata']['name'], 'acme')
+ self.assertEqual(results['results'][0]['users'][0], 'user1')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
diff --git a/roles/lib_openshift/src/test/unit/test_oc_image.py b/roles/lib_openshift/src/test/unit/test_oc_image.py
new file mode 100755
index 000000000..943c8ca17
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_image.py
@@ -0,0 +1,280 @@
+'''
+ Unit tests for oc image
+'''
+import os
+import sys
+import unittest
+import mock
+import six
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_image import OCImage, locate_oc_binary # noqa: E402
+
+
+class OCImageTest(unittest.TestCase):
+ '''
+ Test class for OCImage
+ '''
+
+ @mock.patch('oc_image.Utils.create_tmpfile_copy')
+ @mock.patch('oc_image.OCImage._run')
+ def test_state_list(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing a label list '''
+ params = {'registry_url': 'registry.ops.openshift.com',
+ 'image_name': 'oso-rhel7-zagg-web',
+ 'image_tag': 'int',
+ 'namespace': 'default',
+ 'state': 'list',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ istream = '''{
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "oso-rhel7-zagg-web",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/imagestreams/oso-rhel7-zagg-web",
+ "uid": "6ca2b199-dcdb-11e6-8ffd-0a5f8e3e32be",
+ "resourceVersion": "8135944",
+ "generation": 1,
+ "creationTimestamp": "2017-01-17T17:36:05Z",
+ "annotations": {
+ "openshift.io/image.dockerRepositoryCheck": "2017-01-17T17:36:05Z"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "int",
+ "annotations": null,
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.ops.openshift.com/ops/oso-rhel7-zagg-web:int"
+ },
+ "generation": 1,
+ "importPolicy": {}
+ }
+ ]
+ },
+ "status": {
+ "dockerImageRepository": "172.30.183.164:5000/default/oso-rhel7-zagg-web",
+ "tags": [
+ {
+ "tag": "int",
+ "items": [
+ {
+ "created": "2017-01-17T17:36:05Z",
+ "dockerImageReference": "registry.ops.openshift.com/ops/oso-rhel7-zagg-web@sha256:645bab780cf18a9b764d64b02ca65c39d13cb16f19badd0a49a1668629759392",
+ "image": "sha256:645bab780cf18a9b764d64b02ca65c39d13cb16f19badd0a49a1668629759392",
+ "generation": 1
+ }
+ ]
+ }
+ ]
+ }
+ }
+ '''
+
+ mock_cmd.side_effect = [
+ (0, istream, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCImage.run_ansible(params, False)
+
+ self.assertFalse(results['changed'])
+ self.assertEquals(results['results']['results'][0]['metadata']['name'], 'oso-rhel7-zagg-web')
+
+ @mock.patch('oc_image.Utils.create_tmpfile_copy')
+ @mock.patch('oc_image.OCImage._run')
+ def test_state_present(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing a image present '''
+ params = {'registry_url': 'registry.ops.openshift.com',
+ 'image_name': 'oso-rhel7-zagg-web',
+ 'image_tag': 'int',
+ 'namespace': 'default',
+ 'state': 'present',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ istream = '''{
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "oso-rhel7-zagg-web",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/imagestreams/oso-rhel7-zagg-web",
+ "uid": "6ca2b199-dcdb-11e6-8ffd-0a5f8e3e32be",
+ "resourceVersion": "8135944",
+ "generation": 1,
+ "creationTimestamp": "2017-01-17T17:36:05Z",
+ "annotations": {
+ "openshift.io/image.dockerRepositoryCheck": "2017-01-17T17:36:05Z"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "int",
+ "annotations": null,
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.ops.openshift.com/ops/oso-rhel7-zagg-web:int"
+ },
+ "generation": 1,
+ "importPolicy": {}
+ }
+ ]
+ },
+ "status": {
+ "dockerImageRepository": "172.30.183.164:5000/default/oso-rhel7-zagg-web",
+ "tags": [
+ {
+ "tag": "int",
+ "items": [
+ {
+ "created": "2017-01-17T17:36:05Z",
+ "dockerImageReference": "registry.ops.openshift.com/ops/oso-rhel7-zagg-web@sha256:645bab780cf18a9b764d64b02ca65c39d13cb16f19badd0a49a1668629759392",
+ "image": "sha256:645bab780cf18a9b764d64b02ca65c39d13cb16f19badd0a49a1668629759392",
+ "generation": 1
+ }
+ ]
+ }
+ ]
+ }
+ }
+ '''
+
+ mock_cmd.side_effect = [
+ (1, '', 'Error from server: imagestreams "oso-rhel7-zagg-web" not found'),
+ (0, '', ''),
+ (0, istream, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCImage.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertTrue(results['results']['results'][0]['metadata']['name'] == 'oso-rhel7-zagg-web')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
diff --git a/roles/lib_openshift/src/test/unit/test_oc_project.py b/roles/lib_openshift/src/test/unit/test_oc_project.py
index 5155101cb..fa454d035 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_project.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_project.py
@@ -2,6 +2,7 @@
Unit tests for oc project
'''
+import copy
import os
import sys
import unittest
@@ -20,9 +21,22 @@ from oc_project import OCProject # noqa: E402
class OCProjectTest(unittest.TestCase):
'''
- Test class for OCSecret
+ Test class for OCProject
'''
+ # run_ansible input parameters
+ params = {
+ 'state': 'present',
+ 'display_name': 'operations project',
+ 'name': 'operations',
+ 'node_selector': ['ops_only=True'],
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False,
+ 'admin': None,
+ 'admin_role': 'admin',
+ 'description': 'All things operations project',
+ }
+
@mock.patch('oc_project.locate_oc_binary')
@mock.patch('oc_project.Utils.create_tmpfile_copy')
@mock.patch('oc_project.Utils._write')
@@ -30,21 +44,9 @@ class OCProjectTest(unittest.TestCase):
def test_adding_a_project(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_loc_oc_bin):
''' Testing adding a project '''
- # Arrange
+ params = copy.deepcopy(OCProjectTest.params)
# run_ansible input parameters
- params = {
- 'state': 'present',
- 'display_name': 'operations project',
- 'name': 'operations',
- 'node_selector': ['ops_only=True'],
- 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
- 'debug': False,
- 'admin': None,
- 'admin_role': 'admin',
- 'description': 'All things operations project',
- }
-
project_results = '''{
"kind": "Project",
"apiVersion": "v1",
@@ -90,7 +92,6 @@ class OCProjectTest(unittest.TestCase):
]
# Act
-
results = OCProject.run_ansible(params, False)
# Assert
@@ -108,3 +109,172 @@ class OCProjectTest(unittest.TestCase):
mock.call(['oc', 'get', 'namespace', 'operations', '-o', 'json'], None),
])
+
+ @mock.patch('oc_project.locate_oc_binary')
+ @mock.patch('oc_project.Utils.create_tmpfile_copy')
+ @mock.patch('oc_project.Utils._write')
+ @mock.patch('oc_project.OCProject._run')
+ def test_modifying_a_project_no_attributes(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_loc_oc_bin):
+ ''' Testing adding a project '''
+ params = copy.deepcopy(self.params)
+ params['display_name'] = None
+ params['node_selector'] = None
+ params['description'] = None
+
+ # run_ansible input parameters
+ project_results = '''{
+ "kind": "Project",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "operations",
+ "selfLink": "/oapi/v1/projects/operations",
+ "uid": "5e52afb8-ee33-11e6-89f4-0edc441d9666",
+ "resourceVersion": "1584",
+ "labels": {},
+ "annotations": {
+ "openshift.io/node-selector": "",
+ "openshift.io/description: "This is a description",
+ "openshift.io/sa.initialized-roles": "true",
+ "openshift.io/sa.scc.mcs": "s0:c3,c2",
+ "openshift.io/sa.scc.supplemental-groups": "1000010000/10000",
+ "openshift.io/sa.scc.uid-range": "1000010000/10000"
+ }
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ }'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ (0, project_results, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ mock_loc_oc_bin.side_effect = [
+ 'oc',
+ ]
+
+ # Act
+ results = OCProject.run_ansible(params, False)
+
+ # Assert
+ self.assertFalse(results['changed'])
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'namespace', 'operations', '-o', 'json'], None),
+ ])
+
+ @mock.patch('oc_project.locate_oc_binary')
+ @mock.patch('oc_project.Utils.create_tmpfile_copy')
+ @mock.patch('oc_project.Utils._write')
+ @mock.patch('oc_project.OCProject._run')
+ def test_modifying_project_attributes(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_loc_oc_bin):
+ ''' Testing adding a project '''
+ params = copy.deepcopy(self.params)
+ params['display_name'] = 'updated display name'
+ params['node_selector'] = 'type=infra'
+ params['description'] = 'updated description'
+
+ # run_ansible input parameters
+ project_results = '''{
+ "kind": "Project",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "operations",
+ "selfLink": "/oapi/v1/projects/operations",
+ "uid": "5e52afb8-ee33-11e6-89f4-0edc441d9666",
+ "resourceVersion": "1584",
+ "labels": {},
+ "annotations": {
+ "openshift.io/node-selector": "",
+ "openshift.io/description": "This is a description",
+ "openshift.io/sa.initialized-roles": "true",
+ "openshift.io/sa.scc.mcs": "s0:c3,c2",
+ "openshift.io/sa.scc.supplemental-groups": "1000010000/10000",
+ "openshift.io/sa.scc.uid-range": "1000010000/10000"
+ }
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ }'''
+
+ mod_project_results = '''{
+ "kind": "Project",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "operations",
+ "selfLink": "/oapi/v1/projects/operations",
+ "uid": "5e52afb8-ee33-11e6-89f4-0edc441d9666",
+ "resourceVersion": "1584",
+ "labels": {},
+ "annotations": {
+ "openshift.io/node-selector": "type=infra",
+ "openshift.io/description": "updated description",
+ "openshift.io/display-name": "updated display name",
+ "openshift.io/sa.initialized-roles": "true",
+ "openshift.io/sa.scc.mcs": "s0:c3,c2",
+ "openshift.io/sa.scc.supplemental-groups": "1000010000/10000",
+ "openshift.io/sa.scc.uid-range": "1000010000/10000"
+ }
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ }'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ (0, project_results, ''),
+ (0, project_results, ''),
+ (0, '', ''),
+ (0, mod_project_results, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ mock_loc_oc_bin.side_effect = [
+ 'oc',
+ ]
+
+ # Act
+ results = OCProject.run_ansible(params, False)
+
+ # Assert
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['results']['returncode'], 0)
+ self.assertEqual(results['results']['results']['metadata']['annotations']['openshift.io/description'], 'updated description')
+ self.assertEqual(results['state'], 'present')
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'namespace', 'operations', '-o', 'json'], None),
+ mock.call(['oc', 'get', 'namespace', 'operations', '-o', 'json'], None),
+ mock.call(['oc', 'replace', '-f', mock.ANY], None),
+ mock.call(['oc', 'get', 'namespace', 'operations', '-o', 'json'], None),
+ ])
diff --git a/roles/lib_openshift/src/test/unit/test_oc_pvc.py b/roles/lib_openshift/src/test/unit/test_oc_pvc.py
new file mode 100755
index 000000000..82187917d
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_pvc.py
@@ -0,0 +1,366 @@
+'''
+ Unit tests for oc pvc
+'''
+
+import copy
+import os
+import six
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error,wrong-import-position
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_pvc import OCPVC, locate_oc_binary # noqa: E402
+
+
+class OCPVCTest(unittest.TestCase):
+ '''
+ Test class for OCPVC
+ '''
+ params = {'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'state': 'present',
+ 'debug': False,
+ 'name': 'mypvc',
+ 'namespace': 'test',
+ 'volume_capacity': '1G',
+ 'access_modes': 'ReadWriteMany'}
+
+ @mock.patch('oc_pvc.Utils.create_tmpfile_copy')
+ @mock.patch('oc_pvc.OCPVC._run')
+ def test_create_pvc(self, mock_run, mock_tmpfile_copy):
+ ''' Testing a pvc create '''
+ params = copy.deepcopy(OCPVCTest.params)
+
+ pvc = '''{"kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mypvc",
+ "namespace": "test",
+ "selfLink": "/api/v1/namespaces/test/persistentvolumeclaims/mypvc",
+ "uid": "77597898-d8d8-11e6-aea5-0e3c0c633889",
+ "resourceVersion": "126510787",
+ "creationTimestamp": "2017-01-12T15:04:50Z",
+ "labels": {
+ "mypvc": "database"
+ },
+ "annotations": {
+ "pv.kubernetes.io/bind-completed": "yes",
+ "pv.kubernetes.io/bound-by-controller": "yes",
+ "v1.2-volume.experimental.kubernetes.io/provisioning-required": "volume.experimental.kubernetes.io/provisioning-completed"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "1Gi"
+ }
+ },
+ "volumeName": "pv-aws-ow5vl"
+ },
+ "status": {
+ "phase": "Bound",
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "capacity": {
+ "storage": "1Gi"
+ }
+ }
+ }'''
+
+ mock_run.side_effect = [
+ (1, '', 'Error from server: persistentvolumeclaims "mypvc" not found'),
+ (1, '', 'Error from server: persistentvolumeclaims "mypvc" not found'),
+ (0, '', ''),
+ (0, pvc, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCPVC.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['results']['results'][0]['metadata']['name'], 'mypvc')
+
+ @mock.patch('oc_pvc.Utils.create_tmpfile_copy')
+ @mock.patch('oc_pvc.OCPVC._run')
+ def test_update_pvc(self, mock_run, mock_tmpfile_copy):
+ ''' Testing a pvc create '''
+ params = copy.deepcopy(OCPVCTest.params)
+ params['access_modes'] = 'ReadWriteMany'
+
+ pvc = '''{"kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mypvc",
+ "namespace": "test",
+ "selfLink": "/api/v1/namespaces/test/persistentvolumeclaims/mypvc",
+ "uid": "77597898-d8d8-11e6-aea5-0e3c0c633889",
+ "resourceVersion": "126510787",
+ "creationTimestamp": "2017-01-12T15:04:50Z",
+ "labels": {
+ "mypvc": "database"
+ },
+ "annotations": {
+ "pv.kubernetes.io/bind-completed": "yes",
+ "pv.kubernetes.io/bound-by-controller": "yes",
+ "v1.2-volume.experimental.kubernetes.io/provisioning-required": "volume.experimental.kubernetes.io/provisioning-completed"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "1Gi"
+ }
+ },
+ "volumeName": "pv-aws-ow5vl"
+ },
+ "status": {
+ "phase": "Bound",
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "capacity": {
+ "storage": "1Gi"
+ }
+ }
+ }'''
+
+ mod_pvc = '''{"kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mypvc",
+ "namespace": "test",
+ "selfLink": "/api/v1/namespaces/test/persistentvolumeclaims/mypvc",
+ "uid": "77597898-d8d8-11e6-aea5-0e3c0c633889",
+ "resourceVersion": "126510787",
+ "creationTimestamp": "2017-01-12T15:04:50Z",
+ "labels": {
+ "mypvc": "database"
+ },
+ "annotations": {
+ "pv.kubernetes.io/bind-completed": "yes",
+ "pv.kubernetes.io/bound-by-controller": "yes",
+ "v1.2-volume.experimental.kubernetes.io/provisioning-required": "volume.experimental.kubernetes.io/provisioning-completed"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "1Gi"
+ }
+ },
+ "volumeName": "pv-aws-ow5vl"
+ },
+ "status": {
+ "phase": "Bound",
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "capacity": {
+ "storage": "1Gi"
+ }
+ }
+ }'''
+
+ mock_run.side_effect = [
+ (0, pvc, ''),
+ (0, pvc, ''),
+ (0, '', ''),
+ (0, mod_pvc, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCPVC.run_ansible(params, False)
+
+ self.assertFalse(results['changed'])
+ self.assertEqual(results['results']['msg'], '##### - This volume is currently bound. Will not update - ####')
+
+ @mock.patch('oc_pvc.Utils.create_tmpfile_copy')
+ @mock.patch('oc_pvc.OCPVC._run')
+ def test_delete_pvc(self, mock_run, mock_tmpfile_copy):
+ ''' Testing a pvc create '''
+ params = copy.deepcopy(OCPVCTest.params)
+ params['state'] = 'absent'
+
+ pvc = '''{"kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mypvc",
+ "namespace": "test",
+ "selfLink": "/api/v1/namespaces/test/persistentvolumeclaims/mypvc",
+ "uid": "77597898-d8d8-11e6-aea5-0e3c0c633889",
+ "resourceVersion": "126510787",
+ "creationTimestamp": "2017-01-12T15:04:50Z",
+ "labels": {
+ "mypvc": "database"
+ },
+ "annotations": {
+ "pv.kubernetes.io/bind-completed": "yes",
+ "pv.kubernetes.io/bound-by-controller": "yes",
+ "v1.2-volume.experimental.kubernetes.io/provisioning-required": "volume.experimental.kubernetes.io/provisioning-completed"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "1Gi"
+ }
+ },
+ "volumeName": "pv-aws-ow5vl"
+ },
+ "status": {
+ "phase": "Bound",
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "capacity": {
+ "storage": "1Gi"
+ }
+ }
+ }'''
+
+ mock_run.side_effect = [
+ (0, pvc, ''),
+ (0, '', ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCPVC.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
diff --git a/roles/lib_openshift/src/test/unit/test_oc_route.py b/roles/lib_openshift/src/test/unit/test_oc_route.py
index 09c52a461..afdb5e4dc 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_route.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_route.py
@@ -21,7 +21,7 @@ from oc_route import OCRoute, locate_oc_binary # noqa: E402
class OCRouteTest(unittest.TestCase):
'''
- Test class for OCServiceAccount
+ Test class for OCRoute
'''
@mock.patch('oc_route.locate_oc_binary')
diff --git a/roles/lib_openshift/src/test/unit/test_oc_user.py b/roles/lib_openshift/src/test/unit/test_oc_user.py
new file mode 100755
index 000000000..f7a17cc2c
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_user.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for oc user
+'''
+# To run
+# ./oc_user.py
+#
+# ..
+# ----------------------------------------------------------------------
+# Ran 2 tests in 0.003s
+#
+# OK
+
+import os
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_user import OCUser # noqa: E402
+
+
+class OCUserTest(unittest.TestCase):
+ '''
+ Test class for OCUser
+ '''
+
+ def setUp(self):
+ ''' setup method will create a file and set to known configuration '''
+ pass
+
+ @mock.patch('oc_user.Utils.create_tmpfile_copy')
+ @mock.patch('oc_user.OCUser._run')
+ def test_state_list(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing a user list '''
+ params = {'username': 'testuser@email.com',
+ 'state': 'list',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'full_name': None,
+ 'groups': [],
+ 'debug': False}
+
+ user = '''{
+ "kind": "User",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "testuser@email.com",
+ "selfLink": "/oapi/v1/users/testuser@email.com",
+ "uid": "02fee6c9-f20d-11e6-b83b-12e1a7285e80",
+ "resourceVersion": "38566887",
+ "creationTimestamp": "2017-02-13T16:53:58Z"
+ },
+ "fullName": "Test User",
+ "identities": null,
+ "groups": null
+ }'''
+
+ mock_cmd.side_effect = [
+ (0, user, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCUser.run_ansible(params, False)
+
+ self.assertFalse(results['changed'])
+ self.assertTrue(results['results'][0]['metadata']['name'] == "testuser@email.com")
+
+ @mock.patch('oc_user.Utils.create_tmpfile_copy')
+ @mock.patch('oc_user.OCUser._run')
+ def test_state_present(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing a user list '''
+ params = {'username': 'testuser@email.com',
+ 'state': 'present',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'full_name': 'Test User',
+ 'groups': [],
+ 'debug': False}
+
+ created_user = '''{
+ "kind": "User",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "testuser@email.com",
+ "selfLink": "/oapi/v1/users/testuser@email.com",
+ "uid": "8d508039-f224-11e6-b83b-12e1a7285e80",
+ "resourceVersion": "38646241",
+ "creationTimestamp": "2017-02-13T19:42:28Z"
+ },
+ "fullName": "Test User",
+ "identities": null,
+ "groups": null
+ }'''
+
+ mock_cmd.side_effect = [
+ (1, '', 'Error from server: users "testuser@email.com" not found'), # get
+ (1, '', 'Error from server: users "testuser@email.com" not found'), # get
+ (0, 'user "testuser@email.com" created', ''), # create
+ (0, created_user, ''), # get
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCUser.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertTrue(results['results']['results'][0]['metadata']['name'] ==
+ "testuser@email.com")
+
+ def tearDown(self):
+ '''TearDown method'''
+ pass
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_volume.py b/roles/lib_openshift/src/test/unit/test_oc_volume.py
new file mode 100755
index 000000000..d91e22bc7
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_volume.py
@@ -0,0 +1,633 @@
+'''
+ Unit tests for oc volume
+'''
+
+import copy
+import os
+import six
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_volume import OCVolume, locate_oc_binary # noqa: E402
+
+
+class OCVolumeTest(unittest.TestCase):
+ '''
+ Test class for OCVolume
+ '''
+ params = {'name': 'oso-rhel7-zagg-web',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'namespace': 'test',
+ 'labels': None,
+ 'state': 'present',
+ 'kind': 'dc',
+ 'mount_path': None,
+ 'secret_name': None,
+ 'mount_type': 'pvc',
+ 'claim_name': 'testclaim',
+ 'claim_size': '1G',
+ 'configmap_name': None,
+ 'vol_name': 'test-volume',
+ 'debug': False}
+
+ @mock.patch('oc_volume.Utils.create_tmpfile_copy')
+ @mock.patch('oc_volume.OCVolume._run')
+ def test_create_pvc(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing a label list '''
+ params = copy.deepcopy(OCVolumeTest.params)
+
+ dc = '''{
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "oso-rhel7-zagg-web",
+ "namespace": "new-monitoring",
+ "selfLink": "/oapi/v1/namespaces/new-monitoring/deploymentconfigs/oso-rhel7-zagg-web",
+ "uid": "f56e9dd2-7c13-11e6-b046-0e8844de0587",
+ "resourceVersion": "137095771",
+ "generation": 4,
+ "creationTimestamp": "2016-09-16T13:46:24Z",
+ "labels": {
+ "app": "oso-rhel7-ops-base",
+ "name": "oso-rhel7-zagg-web"
+ },
+ "annotations": {
+ "openshift.io/generated-by": "OpenShiftNewApp"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "updatePeriodSeconds": 1,
+ "intervalSeconds": 1,
+ "timeoutSeconds": 600,
+ "maxUnavailable": "25%",
+ "maxSurge": "25%"
+ },
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "oso-rhel7-zagg-web"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "new-monitoring",
+ "name": "oso-rhel7-zagg-web:latest"
+ },
+ "lastTriggeredImage": "notused"
+ }
+ }
+ ],
+ "replicas": 10,
+ "test": false,
+ "selector": {
+ "deploymentconfig": "oso-rhel7-zagg-web"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "app": "oso-rhel7-ops-base",
+ "deploymentconfig": "oso-rhel7-zagg-web"
+ },
+ "annotations": {
+ "openshift.io/generated-by": "OpenShiftNewApp"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "monitoring-secrets",
+ "secret": {
+ "secretName": "monitoring-secrets"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "oso-rhel7-zagg-web",
+ "image": "notused",
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "monitoring-secrets",
+ "mountPath": "/secrets"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "Always",
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "terminationGracePeriodSeconds": 30,
+ "dnsPolicy": "ClusterFirst",
+ "securityContext": {}
+ }
+ }
+ }
+ }'''
+
+ post_dc = '''{
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "oso-rhel7-zagg-web",
+ "namespace": "new-monitoring",
+ "selfLink": "/oapi/v1/namespaces/new-monitoring/deploymentconfigs/oso-rhel7-zagg-web",
+ "uid": "f56e9dd2-7c13-11e6-b046-0e8844de0587",
+ "resourceVersion": "137095771",
+ "generation": 4,
+ "creationTimestamp": "2016-09-16T13:46:24Z",
+ "labels": {
+ "app": "oso-rhel7-ops-base",
+ "name": "oso-rhel7-zagg-web"
+ },
+ "annotations": {
+ "openshift.io/generated-by": "OpenShiftNewApp"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "updatePeriodSeconds": 1,
+ "intervalSeconds": 1,
+ "timeoutSeconds": 600,
+ "maxUnavailable": "25%",
+ "maxSurge": "25%"
+ },
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "oso-rhel7-zagg-web"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "new-monitoring",
+ "name": "oso-rhel7-zagg-web:latest"
+ },
+ "lastTriggeredImage": "notused"
+ }
+ }
+ ],
+ "replicas": 10,
+ "test": false,
+ "selector": {
+ "deploymentconfig": "oso-rhel7-zagg-web"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "app": "oso-rhel7-ops-base",
+ "deploymentconfig": "oso-rhel7-zagg-web"
+ },
+ "annotations": {
+ "openshift.io/generated-by": "OpenShiftNewApp"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "monitoring-secrets",
+ "secret": {
+ "secretName": "monitoring-secrets"
+ }
+ },
+ {
+ "name": "test-volume",
+ "persistentVolumeClaim": {
+ "claimName": "testclass",
+ "claimSize": "1G"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "oso-rhel7-zagg-web",
+ "image": "notused",
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "monitoring-secrets",
+ "mountPath": "/secrets"
+ },
+ {
+ "name": "test-volume",
+ "mountPath": "/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "Always",
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "terminationGracePeriodSeconds": 30,
+ "dnsPolicy": "ClusterFirst",
+ "securityContext": {}
+ }
+ }
+ }
+ }'''
+
+ mock_cmd.side_effect = [
+ (0, dc, ''),
+ (0, dc, ''),
+ (0, '', ''),
+ (0, post_dc, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCVolume.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertTrue(results['results']['results'][-1]['name'] == 'test-volume')
+
+ @mock.patch('oc_volume.Utils.create_tmpfile_copy')
+ @mock.patch('oc_volume.OCVolume._run')
+ def test_create_configmap(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing a label list '''
+ params = copy.deepcopy(OCVolumeTest.params)
+ params.update({'mount_path': '/configmap',
+ 'mount_type': 'configmap',
+ 'configmap_name': 'configtest',
+ 'vol_name': 'configvol'})
+
+ dc = '''{
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "oso-rhel7-zagg-web",
+ "namespace": "new-monitoring",
+ "selfLink": "/oapi/v1/namespaces/new-monitoring/deploymentconfigs/oso-rhel7-zagg-web",
+ "uid": "f56e9dd2-7c13-11e6-b046-0e8844de0587",
+ "resourceVersion": "137095771",
+ "generation": 4,
+ "creationTimestamp": "2016-09-16T13:46:24Z",
+ "labels": {
+ "app": "oso-rhel7-ops-base",
+ "name": "oso-rhel7-zagg-web"
+ },
+ "annotations": {
+ "openshift.io/generated-by": "OpenShiftNewApp"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "updatePeriodSeconds": 1,
+ "intervalSeconds": 1,
+ "timeoutSeconds": 600,
+ "maxUnavailable": "25%",
+ "maxSurge": "25%"
+ },
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "oso-rhel7-zagg-web"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "new-monitoring",
+ "name": "oso-rhel7-zagg-web:latest"
+ },
+ "lastTriggeredImage": "notused"
+ }
+ }
+ ],
+ "replicas": 10,
+ "test": false,
+ "selector": {
+ "deploymentconfig": "oso-rhel7-zagg-web"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "app": "oso-rhel7-ops-base",
+ "deploymentconfig": "oso-rhel7-zagg-web"
+ },
+ "annotations": {
+ "openshift.io/generated-by": "OpenShiftNewApp"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "monitoring-secrets",
+ "secret": {
+ "secretName": "monitoring-secrets"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "oso-rhel7-zagg-web",
+ "image": "notused",
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "monitoring-secrets",
+ "mountPath": "/secrets"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "Always",
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "terminationGracePeriodSeconds": 30,
+ "dnsPolicy": "ClusterFirst",
+ "securityContext": {}
+ }
+ }
+ }
+ }'''
+
+ post_dc = '''{
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "oso-rhel7-zagg-web",
+ "namespace": "new-monitoring",
+ "selfLink": "/oapi/v1/namespaces/new-monitoring/deploymentconfigs/oso-rhel7-zagg-web",
+ "uid": "f56e9dd2-7c13-11e6-b046-0e8844de0587",
+ "resourceVersion": "137095771",
+ "generation": 4,
+ "creationTimestamp": "2016-09-16T13:46:24Z",
+ "labels": {
+ "app": "oso-rhel7-ops-base",
+ "name": "oso-rhel7-zagg-web"
+ },
+ "annotations": {
+ "openshift.io/generated-by": "OpenShiftNewApp"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "updatePeriodSeconds": 1,
+ "intervalSeconds": 1,
+ "timeoutSeconds": 600,
+ "maxUnavailable": "25%",
+ "maxSurge": "25%"
+ },
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "oso-rhel7-zagg-web"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "new-monitoring",
+ "name": "oso-rhel7-zagg-web:latest"
+ },
+ "lastTriggeredImage": "notused"
+ }
+ }
+ ],
+ "replicas": 10,
+ "test": false,
+ "selector": {
+ "deploymentconfig": "oso-rhel7-zagg-web"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "app": "oso-rhel7-ops-base",
+ "deploymentconfig": "oso-rhel7-zagg-web"
+ },
+ "annotations": {
+ "openshift.io/generated-by": "OpenShiftNewApp"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "monitoring-secrets",
+ "secret": {
+ "secretName": "monitoring-secrets"
+ }
+ },
+ {
+ "name": "configvol",
+ "configMap": {
+ "name": "configtest"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "oso-rhel7-zagg-web",
+ "image": "notused",
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "monitoring-secrets",
+ "mountPath": "/secrets"
+ },
+ {
+ "name": "configvol",
+ "mountPath": "/configmap"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "Always",
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "terminationGracePeriodSeconds": 30,
+ "dnsPolicy": "ClusterFirst",
+ "securityContext": {}
+ }
+ }
+ }
+ }'''
+
+ mock_cmd.side_effect = [
+ (0, dc, ''),
+ (0, dc, ''),
+ (0, '', ''),
+ (0, post_dc, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCVolume.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertTrue(results['results']['results'][-1]['name'] == 'configvol')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_path_exists.side_effect = lambda _: False
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY3, 'py2 test only')
+ @mock.patch('os.path.exists')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_path_exists.side_effect = lambda f: f == oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup fallback '''
+
+ mock_env_get.side_effect = lambda _v, _d: ''
+
+ mock_shutil_which.side_effect = lambda _f, path=None: None
+
+ self.assertEqual(locate_oc_binary(), 'oc')
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in path '''
+
+ oc_bin = '/usr/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in /usr/local/bin '''
+
+ oc_bin = '/usr/local/bin/oc'
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @unittest.skipIf(six.PY2, 'py3 test only')
+ @mock.patch('shutil.which')
+ @mock.patch('os.environ.get')
+ def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
+ ''' Testing binary lookup in ~/bin '''
+
+ oc_bin = os.path.expanduser('~/bin/oc')
+
+ mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
+
+ mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
+
+ self.assertEqual(locate_oc_binary(), oc_bin)
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
index a2ae6b4f6..9adaeeb52 100644
--- a/roles/lib_utils/library/yedit.py
+++ b/roles/lib_utils/library/yedit.py
@@ -180,13 +180,27 @@ EXAMPLES = '''
# a:
# b:
# c: d
+#
+# multiple edits at the same time
+- name: perform multiple edits
+ yedit:
+ src: somefile.yml
+ edits:
+ - key: a#b#c
+ value: d
+ - key: a#b#c#d
+ value: e
+ state: present
+# Results:
+# a:
+# b:
+# c:
+# d: e
'''
# -*- -*- -*- End included fragment: doc/yedit -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/yedit.py -*- -*- -*-
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
class YeditException(Exception):
@@ -220,13 +234,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -242,13 +256,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -270,7 +284,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -359,7 +373,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -459,7 +473,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -578,8 +592,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -640,7 +654,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -666,7 +690,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -698,114 +722,149 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
- if module.params['src']:
+ state = params['state']
+
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
- if rval[0] and module.params['src']:
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: class/yedit.py -*- -*- -*-
@@ -837,12 +896,34 @@ def main():
type='str'),
backup=dict(default=True, type='bool'),
separator=dict(default='.', type='str'),
+ edits=dict(default=None, type='list'),
),
mutually_exclusive=[["curr_value", "index"], ['update', "append"]],
required_one_of=[["content", "src"]],
)
- rval = Yedit.run_ansible(module)
+ # Verify we recieved either a valid key or edits with valid keys when receiving a src file.
+ # A valid key being not None or not ''.
+ if module.params['src'] is not None:
+ key_error = False
+ edit_error = False
+
+ if module.params['key'] in [None, '']:
+ key_error = True
+
+ if module.params['edits'] in [None, []]:
+ edit_error = True
+
+ else:
+ for edit in module.params['edits']:
+ if edit.get('key') in [None, '']:
+ edit_error = True
+ break
+
+ if key_error and edit_error:
+ module.fail_json(failed=True, msg='Empty value for parameter key not allowed.')
+
+ rval = Yedit.run_ansible(module.params)
if 'failed' in rval and rval['failed']:
module.fail_json(**rval)
diff --git a/roles/lib_utils/src/ansible/yedit.py b/roles/lib_utils/src/ansible/yedit.py
index 8a1a7c2dc..c4b818cf1 100644
--- a/roles/lib_utils/src/ansible/yedit.py
+++ b/roles/lib_utils/src/ansible/yedit.py
@@ -26,12 +26,34 @@ def main():
type='str'),
backup=dict(default=True, type='bool'),
separator=dict(default='.', type='str'),
+ edits=dict(default=None, type='list'),
),
mutually_exclusive=[["curr_value", "index"], ['update', "append"]],
required_one_of=[["content", "src"]],
)
- rval = Yedit.run_ansible(module)
+ # Verify we recieved either a valid key or edits with valid keys when receiving a src file.
+ # A valid key being not None or not ''.
+ if module.params['src'] is not None:
+ key_error = False
+ edit_error = False
+
+ if module.params['key'] in [None, '']:
+ key_error = True
+
+ if module.params['edits'] in [None, []]:
+ edit_error = True
+
+ else:
+ for edit in module.params['edits']:
+ if edit.get('key') in [None, '']:
+ edit_error = True
+ break
+
+ if key_error and edit_error:
+ module.fail_json(failed=True, msg='Empty value for parameter key not allowed.')
+
+ rval = Yedit.run_ansible(module.params)
if 'failed' in rval and rval['failed']:
module.fail_json(**rval)
diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py
index 533665db2..e0a27012f 100644
--- a/roles/lib_utils/src/class/yedit.py
+++ b/roles/lib_utils/src/class/yedit.py
@@ -1,6 +1,5 @@
# flake8: noqa
-# pylint: disable=undefined-variable,missing-docstring
-# noqa: E301,E302
+# pylint: skip-file
class YeditException(Exception):
@@ -34,13 +33,13 @@ class Yedit(object):
@property
def separator(self):
- ''' getter method for yaml_dict '''
+ ''' getter method for separator '''
return self._separator
@separator.setter
- def separator(self):
- ''' getter method for yaml_dict '''
- return self._separator
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
@property
def yaml_dict(self):
@@ -56,13 +55,13 @@ class Yedit(object):
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
- return re.findall(Yedit.re_key % ''.join(common_separators), key)
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
- if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@@ -84,7 +83,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -173,7 +172,7 @@ class Yedit(object):
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
+ data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
@@ -273,7 +272,7 @@ class Yedit(object):
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
- raise YeditException('Problem with loading yaml file. %s' % err)
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
@@ -392,8 +391,8 @@ class Yedit(object):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
- raise YeditException('Cannot replace key, value entry in ' +
- 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
@@ -454,7 +453,17 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if not result:
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
@@ -480,7 +489,7 @@ class Yedit(object):
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
- if result:
+ if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
@@ -512,112 +521,147 @@ class Yedit(object):
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
# If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
+ elif isinstance(inc_value, str) and 'str' not in vtype:
try:
- inc_value = yaml.load(inc_value)
+ inc_value = yaml.safe_load(inc_value)
except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
- def run_ansible(module):
+ def run_ansible(params):
'''perform the idempotent crud operations'''
- yamlfile = Yedit(filename=module.params['src'],
- backup=module.params['backup'],
- separator=module.params['separator'])
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
- if module.params['src']:
+ if params['src']:
rval = yamlfile.load()
- if yamlfile.yaml_dict is None and \
- module.params['state'] != 'present':
+ if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
- 'msg': 'Error opening file [%s]. Verify that the ' +
- 'file exists, that it is has correct' +
- ' permissions, and is valid yaml.'}
-
- if module.params['state'] == 'list':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['key']:
- rval = yamlfile.get(module.params['key']) or {}
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
- return {'changed': False, 'result': rval, 'state': "list"}
+ return {'changed': False, 'result': rval, 'state': state}
- elif module.params['state'] == 'absent':
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
- if module.params['update']:
- rval = yamlfile.pop(module.params['key'],
- module.params['value'])
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
else:
- rval = yamlfile.delete(module.params['key'])
+ rval = yamlfile.delete(params['key'])
- if rval[0] and module.params['src']:
+ if rval[0] and params['src']:
yamlfile.write()
- return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
- elif module.params['state'] == 'present':
+ elif state == 'present':
# check if content is different than what is in the file
- if module.params['content']:
- content = Yedit.parse_value(module.params['content'],
- module.params['content_type'])
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
- module.params['value'] is None:
- return {'changed': False,
- 'result': yamlfile.yaml_dict,
- 'state': "present"}
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
- # we were passed a value; parse it
- if module.params['value']:
- value = Yedit.parse_value(module.params['value'],
- module.params['value_type'])
- key = module.params['key']
- if module.params['update']:
- # pylint: disable=line-too-long
- curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
- module.params['curr_value_format']) # noqa: E501
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
- rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
- elif module.params['append']:
- rval = yamlfile.append(key, value)
- else:
- rval = yamlfile.put(key, value)
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
- if rval[0] and module.params['src']:
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
yamlfile.write()
- return {'changed': rval[0],
- 'result': rval[1], 'state': "present"}
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
- if module.params['src']:
+ if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
- 'state': "present"}
+ 'state': state}
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
diff --git a/roles/lib_utils/src/doc/yedit b/roles/lib_utils/src/doc/yedit
index 16b44943e..82af1f675 100644
--- a/roles/lib_utils/src/doc/yedit
+++ b/roles/lib_utils/src/doc/yedit
@@ -135,4 +135,20 @@ EXAMPLES = '''
# a:
# b:
# c: d
+#
+# multiple edits at the same time
+- name: perform multiple edits
+ yedit:
+ src: somefile.yml
+ edits:
+ - key: a#b#c
+ value: d
+ - key: a#b#c#d
+ value: e
+ state: present
+# Results:
+# a:
+# b:
+# c:
+# d: e
'''
diff --git a/roles/lib_utils/src/test/integration/kube-manager-test.yaml.orig b/roles/lib_utils/src/test/integration/kube-manager-test.yaml.orig
deleted file mode 100644
index 5541c3dae..000000000
--- a/roles/lib_utils/src/test/integration/kube-manager-test.yaml.orig
+++ /dev/null
@@ -1,52 +0,0 @@
-apiVersion: v1
-kind: Pod
-metadata:
- name: kube-controller-manager
- namespace: kube-system
-spec:
- hostNetwork: true
- containers:
- - name: kube-controller-manager
- image: openshift/kube:v1.0.0
- command:
- - /hyperkube
- - controller-manager
- - --master=http://127.0.0.1:8080
- - --leader-elect=true
- - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
- - --root-ca-file=/etc/k8s/ssl/my.pem
- - --my-new-parameter=openshift
- livenessProbe:
- httpGet:
- host: 127.0.0.1
- path: /healthz
- port: 10252
- initialDelaySeconds: 15
- timeoutSeconds: 1
- volumeMounts:
- - mountPath: /etc/kubernetes/ssl
- name: ssl-certs-kubernetes
- readOnly: true
- - mountPath: /etc/ssl/certs
- name: ssl-certs-host
- readOnly: 'true'
- volumes:
- - hostPath:
- path: /etc/kubernetes/ssl
- name: ssl-certs-kubernetes
- - hostPath:
- path: /usr/share/ca-certificates
- name: ssl-certs-host
-yedittest: yedittest
-metadata-namespace: openshift-is-awesome
-nonexistingkey:
-- --my-new-parameter=openshift
-a:
- b:
- c: d
-e:
- f:
- g:
- h:
- i:
- j: k
diff --git a/roles/lib_utils/src/test/integration/yedit.yml b/roles/lib_utils/src/test/integration/yedit.yml
index e3dfd490b..65209bade 100755
--- a/roles/lib_utils/src/test/integration/yedit.yml
+++ b/roles/lib_utils/src/test/integration/yedit.yml
@@ -219,4 +219,33 @@
assert:
that: results.result == [1, 2, 3]
msg: "Test: '[1, 2, 3]' != [{{ results.result }}]"
-###### end test create list value #####
+ ###### end test create list value #####
+
+ ###### test create multiple list value #####
+ - name: test multiple edits
+ yedit:
+ src: "{{ test_file }}"
+ edits:
+ - key: z.x.y
+ value:
+ - 1
+ - 2
+ - 3
+ - key: z.x.y
+ value: 4
+ action: append
+
+ - name: retrieve the key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: z#x#y
+ separator: '#'
+ register: results
+ - debug: var=results
+
+ - name: Assert that the key was created
+ assert:
+ that: results.result == [1, 2, 3, 4]
+ msg: "Test: '[1, 2, 3, 4]' != [{{ results.result }}]"
+ ###### end test create multiple list value #####
diff --git a/roles/lib_utils/src/test/unit/test_yedit.py b/roles/lib_utils/src/test/unit/test_yedit.py
index 23a3f7353..f9f42843a 100755
--- a/roles/lib_utils/src/test/unit/test_yedit.py
+++ b/roles/lib_utils/src/test/unit/test_yedit.py
@@ -5,6 +5,7 @@
import os
import sys
import unittest
+import mock
# Removing invalid variable names for tests so that I can
# keep them brief
@@ -277,6 +278,91 @@ class YeditTest(unittest.TestCase):
with self.assertRaises(YeditException):
yed.put('new.stuff.here[0]', 'item')
+ def test_empty_key_with_int_value(self):
+ '''test editing top level with not list or dict'''
+ yed = Yedit(content={'a': {'b': 12}})
+ result = yed.put('', 'b')
+ self.assertFalse(result[0])
+
+ def test_setting_separator(self):
+ '''test editing top level with not list or dict'''
+ yed = Yedit(content={'a': {'b': 12}})
+ yed.separator = ':'
+ self.assertEqual(yed.separator, ':')
+
+ def test_remove_all(self):
+ '''test removing all data'''
+ data = Yedit.remove_entry({'a': {'b': 12}}, '')
+ self.assertTrue(data)
+
+ def test_remove_list_entry(self):
+ '''test removing list entry'''
+ data = {'a': {'b': [{'c': 3}]}}
+ results = Yedit.remove_entry(data, 'a.b[0]')
+ self.assertTrue(results)
+ self.assertTrue(data, {'a': {'b': []}})
+
+ def test_parse_value_string_true(self):
+ '''test parse_value'''
+ results = Yedit.parse_value('true', 'str')
+ self.assertEqual(results, 'true')
+
+ def test_parse_value_bool_true(self):
+ '''test parse_value'''
+ results = Yedit.parse_value('true', 'bool')
+ self.assertTrue(results)
+
+ def test_parse_value_bool_exception(self):
+ '''test parse_value'''
+ with self.assertRaises(YeditException):
+ Yedit.parse_value('TTT', 'bool')
+
+ @mock.patch('yedit.Yedit.write')
+ def test_run_ansible_basic(self, mock_write):
+ '''test parse_value'''
+ params = {
+ 'src': None,
+ 'backup': False,
+ 'separator': '.',
+ 'state': 'present',
+ 'edits': [],
+ 'value': None,
+ 'key': None,
+ 'content': {'a': {'b': {'c': 1}}},
+ 'content_type': '',
+ }
+
+ results = Yedit.run_ansible(params)
+
+ mock_write.side_effect = [
+ (True, params['content']),
+ ]
+
+ self.assertFalse(results['changed'])
+
+ @mock.patch('yedit.Yedit.write')
+ def test_run_ansible_and_write(self, mock_write):
+ '''test parse_value'''
+ params = {
+ 'src': '/tmp/test',
+ 'backup': False,
+ 'separator': '.',
+ 'state': 'present',
+ 'edits': [],
+ 'value': None,
+ 'key': None,
+ 'content': {'a': {'b': {'c': 1}}},
+ 'content_type': '',
+ }
+
+ results = Yedit.run_ansible(params)
+
+ mock_write.side_effect = [
+ (True, params['content']),
+ ]
+
+ self.assertTrue(results['changed'])
+
def tearDown(self):
'''TearDown method'''
os.unlink(YeditTest.filename)
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index d211d30e8..fefd28bbd 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -22,6 +22,15 @@
- nuage.key
- nuage.kubeconfig
+- name: Copy the certificates and keys
+ become: yes
+ copy: src="/tmp/{{ item }}" dest="{{ cert_output_dir }}/{{ item }}"
+ with_items:
+ - ca.crt
+ - nuage.crt
+ - nuage.key
+ - nuage.kubeconfig
+
- include: certificates.yml
- name: Create nuage-openshift-monitor.yaml
diff --git a/roles/nuage_master/tasks/serviceaccount.yml b/roles/nuage_master/tasks/serviceaccount.yml
index 16ea08244..eee448e2c 100644
--- a/roles/nuage_master/tasks/serviceaccount.yml
+++ b/roles/nuage_master/tasks/serviceaccount.yml
@@ -3,14 +3,20 @@
command: mktemp -u /tmp/openshift-ansible-XXXXXXX.kubeconfig
register: nuage_tmp_conf_mktemp
changed_when: False
+ run_once: True
+ delegate_to: "{{ nuage_ca_master }}"
- set_fact:
nuage_tmp_conf: "{{ nuage_tmp_conf_mktemp.stdout }}"
+ run_once: True
+ delegate_to: "{{ nuage_ca_master }}"
- name: Copy Configuration to temporary conf
command: >
cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{nuage_tmp_conf}}
changed_when: false
+ run_once: True
+ delegate_to: "{{ nuage_ca_master }}"
- name: Create Admin Service Account
oc_serviceaccount:
@@ -18,6 +24,8 @@
name: nuage
namespace: default
state: present
+ run_once: True
+ delegate_to: "{{ nuage_ca_master }}"
- name: Configure role/user permissions
command: >
@@ -27,6 +35,8 @@
register: osnuage_perm_task
failed_when: "'the object has been modified' not in osnuage_perm_task.stderr and osnuage_perm_task.rc != 0"
changed_when: osnuage_perm_task.rc == 0
+ run_once: True
+ delegate_to: "{{ nuage_ca_master }}"
- name: Generate the node client config
command: >
@@ -40,8 +50,12 @@
--signer-serial={{ openshift_master_ca_serial }}
--basename='nuage'
--user={{ nuage_service_account }}
+ delegate_to: "{{ nuage_ca_master }}"
+ run_once: True
- name: Clean temporary configuration file
command: >
rm -f {{nuage_tmp_conf}}
changed_when: false
+ delegate_to: "{{ nuage_ca_master }}"
+ run_once: True
diff --git a/roles/openshift_ca/README.md b/roles/openshift_ca/README.md
index 96c9cd5f2..dfbe81c6c 100644
--- a/roles/openshift_ca/README.md
+++ b/roles/openshift_ca/README.md
@@ -19,6 +19,8 @@ From this role:
| openshift_ca_key | `{{ openshift_ca_config_dir }}/ca.key` | CA key path including CA key filename. |
| openshift_ca_serial | `{{ openshift_ca_config_dir }}/ca.serial.txt` | CA serial path including CA serial filename. |
| openshift_version | `{{ openshift_pkg_version }}` | OpenShift package version. |
+| openshift_master_cert_expire_days | `730` (2 years) | Validity of the certificates in days. Works only with OpenShift version 1.5 (3.5) and later. |
+| openshift_ca_cert_expire_days | `1825` (5 years) | Validity of the CA certificates in days. Works only with OpenShift version 1.5 (3.5) and later. |
Dependencies
------------
diff --git a/roles/openshift_ca/defaults/main.yml b/roles/openshift_ca/defaults/main.yml
new file mode 100644
index 000000000..ecfcc88b3
--- /dev/null
+++ b/roles/openshift_ca/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+openshift_ca_cert_expire_days: 1825
+openshift_master_cert_expire_days: 730
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index 70c2a9121..3b17d9ed6 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -88,7 +88,7 @@
# This should NOT replace the CA due to --overwrite=false when a CA already exists.
- name: Create the master certificates if they do not already exist
command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-master-certs
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-master-certs
{% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
@@ -99,6 +99,10 @@
--master={{ openshift.master.api_url }}
--public-master={{ openshift.master.public_api_url }}
--cert-dir={{ openshift_ca_config_dir }}
+ {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
+ --expire-days={{ openshift_master_cert_expire_days }}
+ --signer-expire-days={{ openshift_ca_cert_expire_days }}
+ {% endif %}
--overwrite=false
when: master_ca_missing | bool or openshift_certificates_redeploy | default(false) | bool
delegate_to: "{{ openshift_ca_host }}"
diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md
index df43c3770..107e27f89 100644
--- a/roles/openshift_certificate_expiry/README.md
+++ b/roles/openshift_certificate_expiry/README.md
@@ -19,7 +19,6 @@ to be used with an inventory that is representative of the
cluster. For best results run `ansible-playbook` with the `-v` option.
-
# Role Variables
Core variables in this role:
@@ -51,8 +50,8 @@ How to use the Certificate Expiration Checking Role.
Run one of the example playbooks using an inventory file
representative of your existing cluster. Some example playbooks are
-included in this role, or you can read on below after this example to
-craft you own.
+included in this role, or you can [read on below for more examples](#more-example-playbooks)
+to help you craft you own.
```
$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/easy-mode.yaml
@@ -69,11 +68,47 @@ Using the `easy-mode.yaml` playbook will produce:
> `/usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/easy-mode.yaml`
> instead
+## Run from a container
+
+The example playbooks that use this role are packaged in the
+[container image for openshift-ansible](../../README_CONTAINER_IMAGE.md), so you
+can run any of them by setting the `PLAYBOOK_FILE` environment variable when
+running an openshift-ansible container.
+
+There are several [examples](../../examples/README.md) in the `examples` directory that run certificate check playbooks from a container running on OpenShift.
+
## More Example Playbooks
> **Note:** These Playbooks are available to run directly out of the
> [/playbooks/certificate_expiry/](../../playbooks/certificate_expiry/) directory.
+### Default behavior
+
+This playbook just invokes the certificate expiration check role with default options:
+
+
+```yaml
+---
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ roles:
+ - role: openshift_certificate_expiry
+```
+
+**From git:**
+```
+$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/default.yaml
+```
+**From openshift-ansible-playbooks rpm:**
+```
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/default.yaml
+```
+
+> [View This Playbook](../../playbooks/certificate_expiry/default.yaml)
+
+### Easy mode
This example playbook is great if you're just wanting to **try the
role out**. This playbook enables HTML and JSON reports. All
@@ -104,35 +139,70 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/ce
> [View This Playbook](../../playbooks/certificate_expiry/easy-mode.yaml)
-***
+### Easy mode and upload reports to masters
+
+This example builds on top of [easy-mode.yaml](#easy-mode) and additionally
+uploads a copy of the generated reports to the masters, with a timestamp in the
+file names.
+
+This is specially useful when the playbook runs from within a container, because
+the reports are generated inside the container and we need a way to access them.
+Uploading a copy of the reports to the masters is one way to make it easy to
+access them. Alternatively you can use the
+[role variables](#role-variables) that control the path of the generated reports
+to point to a container volume (see the [playbook with custom paths](#generate-html-and-json-reports-in-a-custom-path) for an example).
-Default behavior:
+With the container use case in mind, this playbook allows control over some
+options via environment variables:
+
+ - `CERT_EXPIRY_WARN_DAYS`: sets `openshift_certificate_expiry_warning_days`, overriding the role's default.
+ - `COPY_TO_PATH`: path in the masters where generated reports are uploaded.
```yaml
---
-- name: Check cert expirys
+- name: Generate certificate expiration reports
hosts: nodes:masters:etcd
- become: yes
gather_facts: no
+ vars:
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_show_all: yes
+ openshift_certificate_expiry_warning_days: "{{ lookup('env', 'CERT_EXPIRY_WARN_DAYS') | default('45', true) }}"
roles:
- role: openshift_certificate_expiry
+
+- name: Upload reports to master
+ hosts: masters
+ gather_facts: no
+ vars:
+ destination_path: "{{ lookup('env', 'COPY_TO_PATH') | default('/etc/origin/certificate_expiration_report', true) }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}"
+ tasks:
+ - name: Create directory in masters
+ file:
+ path: "{{ destination_path }}"
+ state: directory
+ - name: Copy the reports to the masters
+ copy:
+ dest: "{{ destination_path }}/{{ timestamp }}-{{ item }}"
+ src: "/tmp/{{ item }}"
+ with_items:
+ - "cert-expiry-report.html"
+ - "cert-expiry-report.json"
```
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/easy-mode-upload.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/easy-mode-upload.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/default.yaml)
+> [View This Playbook](../../playbooks/certificate_expiry/easy-mode-upload.yaml)
-***
-
-
-Generate HTML and JSON artifacts in their default paths:
+### Generate HTML and JSON artifacts in their default paths
```yaml
---
@@ -158,7 +228,38 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/ce
> [View This Playbook](../../playbooks/certificate_expiry/html_and_json_default_paths.yaml)
-***
+### Generate HTML and JSON reports in a custom path
+
+This example customizes the report generation path to point to a specific path (`/var/lib/certcheck`) and uses a date timestamp for the generated files. This allows you to reuse a certain location to keep multiple copies of the reports.
+
+```yaml
+---
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_save_json_results: yes
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}"
+ openshift_certificate_expiry_html_report_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.html"
+ openshift_certificate_expiry_json_results_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.json"
+ roles:
+ - role: openshift_certificate_expiry
+```
+
+**From git:**
+```
+$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/html_and_json_timestamp.yaml
+```
+**From openshift-ansible-playbooks rpm:**
+```
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/html_and_json_timestamp.yaml
+```
+
+> [View This Playbook](../../playbooks/certificate_expiry/html_and_json_timestamp.yaml)
+
+### Long warning window
Change the expiration warning window to 1500 days (good for testing
the module out):
@@ -186,7 +287,7 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/ce
> [View This Playbook](../../playbooks/certificate_expiry/longer_warning_period.yaml)
-***
+### Long warning window and JSON report
Change the expiration warning window to 1500 days (good for testing
the module out) and save the results as a JSON file:
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index 33a4faf3e..c204b5341 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -10,8 +10,9 @@ import os
import subprocess
import yaml
-from six.moves import configparser
-
+# pylint import-error disabled because pylint cannot find the package
+# when installed in a virtualenv
+from ansible.module_utils.six.moves import configparser # pylint: disable=import-error
from ansible.module_utils.basic import AnsibleModule
try:
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index e55c288a8..d9ccf87bc 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -24,6 +24,14 @@
when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
- fail:
+ msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
+
+- fail:
+ msg: Calico cannot currently be used with Flannel in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
+
+- fail:
msg: openshift_hostname must be 64 characters or less
when: openshift_hostname is defined and openshift_hostname | length > 64
@@ -35,6 +43,7 @@
use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
use_flannel: "{{ openshift_use_flannel | default(None) }}"
+ use_calico: "{{openshift_use_calico | default(None) }}"
use_nuage: "{{ openshift_use_nuage | default(None) }}"
use_contiv: "{{ openshift_use_contiv | default(None) }}"
use_manageiq: "{{ openshift_use_manageiq | default(None) }}"
diff --git a/roles/openshift_serviceaccounts/meta/main.yml b/roles/openshift_etcd_ca/meta/main.yml
index 7a30c220f..d73d27356 100644
--- a/roles/openshift_serviceaccounts/meta/main.yml
+++ b/roles/openshift_etcd_ca/meta/main.yml
@@ -1,16 +1,17 @@
---
galaxy_info:
- author: OpenShift Operations
- description: OpenShift Service Accounts
+ author: Tim Bielawa
+ description: Meta role around the etcd_ca role
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.9
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
- 7
categories:
- cloud
+ - system
dependencies:
-- { role: openshift_facts }
-- { role: lib_openshift }
+- role: openshift_etcd_facts
+- role: etcd_ca
diff --git a/roles/openshift_etcd_ca/tasks/main.yml b/roles/openshift_etcd_ca/tasks/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/openshift_etcd_ca/tasks/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index e3cc3a9b4..0f2bec6d3 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -6,7 +6,7 @@
# This script should be run from openshift-ansible/roles/openshift_examples
XPAAS_VERSION=ose-v1.3.6
-ORIGIN_VERSION=${1:-v1.5}
+ORIGIN_VERSION=${1:-v1.6}
RHAMP_TAG=1.0.0.GA
RHAMP_TEMPLATE=https://raw.githubusercontent.com/3scale/rhamp-openshift-templates/${RHAMP_TAG}/apicast-gateway/apicast-gateway-template.yml
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
@@ -30,12 +30,15 @@ mv application-templates-${XPAAS_VERSION}/jboss-image-streams.json ${EXAMPLES_BA
mv application-templates-GA/fis-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json
mv application-templates-GA/quickstarts/* ${EXAMPLES_BASE}/xpaas-templates/
find application-templates-${XPAAS_VERSION}/ -name '*.json' ! -wholename '*secret*' ! -wholename '*demo*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \;
+popd
+
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/dotnet_imagestreams.json -O ${EXAMPLES_BASE}/image-streams/dotnet_imagestreams.json
+wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-example.json -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-example.json
+wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-pgsql-persistent.json -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-pgsql-persistent.json
+wget ${RHAMP_TEMPLATE} -O ${EXAMPLES_BASE}/quickstart-templates/apicast-gateway-template.yml
wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/origin/metrics-deployer.yaml
wget https://raw.githubusercontent.com/openshift/origin-metrics/enterprise/metrics.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/enterprise/metrics-deployer.yaml
wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployer/deployer.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/origin/logging-deployer.yaml
wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/enterprise/deployment/deployer.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/enterprise/logging-deployer.yaml
-wget ${RHAMP_TEMPLATE} -O ${EXAMPLES_BASE}/quickstart-templates/apicast-gateway-template.yml
-popd
git diff files/examples
diff --git a/roles/openshift_examples/files/examples/latest b/roles/openshift_examples/files/examples/latest
index 59b6ef75f..536385712 120000
--- a/roles/openshift_examples/files/examples/latest
+++ b/roles/openshift_examples/files/examples/latest
@@ -1 +1 @@
-v1.5 \ No newline at end of file
+v1.6 \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json
index 82a09a3ec..e9af50937 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json
@@ -21,6 +21,16 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-password" : "${REDIS_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -117,7 +127,12 @@
"env": [
{
"name": "REDIS_PASSWORD",
- "value": "${REDIS_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json
index 1d5f59188..aa27578a9 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json
@@ -21,6 +21,16 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-password" : "${REDIS_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -134,7 +144,12 @@
"env": [
{
"name": "REDIS_PASSWORD",
- "value": "${REDIS_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v1.5/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v1.5/image-streams/dotnet_imagestreams.json
index 0d5ac21d8..857ffa980 100644
--- a/roles/openshift_examples/files/examples/v1.5/image-streams/dotnet_imagestreams.json
+++ b/roles/openshift_examples/files/examples/v1.5/image-streams/dotnet_imagestreams.json
@@ -27,8 +27,9 @@
"iconClass": "icon-dotnet",
"tags": "builder,.net,dotnet,dotnetcore",
"supports":"dotnet",
- "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git",
- "sampleContextDir": "1.1/test/asp-net-hello-world"
+ "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git",
+ "sampleContextDir": "app",
+ "sampleRef": "dotnetcore-1.1"
},
"from": {
"kind": "ImageStreamTag",
@@ -43,8 +44,9 @@
"iconClass": "icon-dotnet",
"tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore11",
"supports":"dotnet:1.1,dotnet",
- "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git",
- "sampleContextDir": "1.1/test/asp-net-hello-world",
+ "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git",
+ "sampleContextDir": "app",
+ "sampleRef": "dotnetcore-1.1",
"version": "1.1"
},
"from": {
@@ -60,8 +62,9 @@
"iconClass": "icon-dotnet",
"tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore10",
"supports":"dotnet:1.0,dotnet",
- "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git",
- "sampleContextDir": "1.0/test/asp-net-hello-world",
+ "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git",
+ "sampleContextDir": "app",
+ "sampleRef": "dotnetcore-1.0",
"version": "1.0"
},
"from": {
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-example.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-example.json
new file mode 100644
index 000000000..a09d71a00
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-example.json
@@ -0,0 +1,333 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dotnet-example",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core",
+ "description": "An example .NET Core application.",
+ "tags": "quickstart,dotnet,.net",
+ "iconClass": "icon-dotnet",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/redhat-developer/s2i-dotnetcore",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "objects": [
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "${DOTNET_IMAGE_STREAM_TAG}"
+ },
+ "env": [
+ {
+ "name": "DOTNET_STARTUP_PROJECT",
+ "value": "${DOTNET_STARTUP_PROJECT}"
+ },
+ {
+ "name": "DOTNET_ASSEMBLY_NAME",
+ "value": "${DOTNET_ASSEMBLY_NAME}"
+ },
+ {
+ "name": "DOTNET_NPM_TOOLS",
+ "value": "${DOTNET_NPM_TOOLS}"
+ },
+ {
+ "name": "DOTNET_TEST_PROJECTS",
+ "value": "${DOTNET_TEST_PROJECTS}"
+ },
+ {
+ "name": "DOTNET_CONFIGURATION",
+ "value": "${DOTNET_CONFIGURATION}"
+ },
+ {
+ "name": "DOTNET_PUBLISH",
+ "value": "true"
+ },
+ {
+ "name": "DOTNET_RESTORE_SOURCES",
+ "value": "${DOTNET_RESTORE_SOURCES}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "dotnet-app"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "dotnet-app",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "livenessProbe": {
+ "httpGet": {
+ "path": "/",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 40,
+ "timeoutSeconds": 15
+ },
+ "readinessProbe": {
+ "httpGet": {
+ "path": "/",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 10,
+ "timeoutSeconds": 30
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "env": []
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "dotnet-example"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "DOTNET_IMAGE_STREAM_TAG",
+ "displayName": ".NET builder",
+ "required": true,
+ "description": "The image stream tag which is used to build the code.",
+ "value": "dotnet:1.0"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "value": "dotnetcore-1.0"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to use a subdirectory of the source code repository"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the .NET Core service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DOTNET_STARTUP_PROJECT",
+ "displayName": "Startup Project",
+ "description": "Set this to the folder containing your startup project.",
+ "value": "app"
+ },
+ {
+ "name": "DOTNET_ASSEMBLY_NAME",
+ "displayName": "Startup Assembly",
+ "description": "Set this when the assembly name is overridden in the project file."
+ },
+ {
+ "name": "DOTNET_NPM_TOOLS",
+ "displayName": "Npm Tools",
+ "description": "Set this to a space separated list of npm tools needed to publish.",
+ "value": "bower gulp"
+ },
+ {
+ "name": "DOTNET_TEST_PROJECTS",
+ "displayName": "Test projects",
+ "description": "Set this to a space separated list of test projects to run before publishing."
+ },
+ {
+ "name": "DOTNET_CONFIGURATION",
+ "displayName": "Configuration",
+ "description": "Set this to configuration (Release/Debug).",
+ "value": "Release"
+ },
+ {
+ "name": "DOTNET_RESTORE_SOURCES",
+ "displayName": "NuGet package sources",
+ "description": "Set this to override the NuGet.config sources."
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json
new file mode 100644
index 000000000..fa31f7f61
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json
@@ -0,0 +1,544 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dotnet-pgsql-persistent",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core + PostgreSQL (Persistent)",
+ "description": "An example .NET Core application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore.",
+ "tags": "quickstart,dotnet",
+ "iconClass": "icon-dotnet",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/redhat-developer/s2i-dotnetcore",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore.",
+ "labels": {
+ "template": "dotnet-pgsql-persistent"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "${DOTNET_IMAGE_STREAM_TAG}"
+ },
+ "env": [
+ {
+ "name": "DOTNET_STARTUP_PROJECT",
+ "value": "${DOTNET_STARTUP_PROJECT}"
+ },
+ {
+ "name": "DOTNET_ASSEMBLY_NAME",
+ "value": "${DOTNET_ASSEMBLY_NAME}"
+ },
+ {
+ "name": "DOTNET_NPM_TOOLS",
+ "value": "${DOTNET_NPM_TOOLS}"
+ },
+ {
+ "name": "DOTNET_TEST_PROJECTS",
+ "value": "${DOTNET_TEST_PROJECTS}"
+ },
+ {
+ "name": "DOTNET_CONFIGURATION",
+ "value": "${DOTNET_CONFIGURATION}"
+ },
+ {
+ "name": "DOTNET_PUBLISH",
+ "value": "true"
+ },
+ {
+ "name": "DOTNET_RESTORE_SOURCES",
+ "value": "${DOTNET_RESTORE_SOURCES}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {}
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "updatePeriodSeconds": 1,
+ "intervalSeconds": 1,
+ "timeoutSeconds": 600,
+ "maxUnavailable": "25%",
+ "maxSurge": "25%"
+ },
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "dotnet-pgsql-persistent"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "dotnet-pgsql-persistent",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "env": [
+ {
+ "name": "ConnectionString",
+ "value": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "livenessProbe": {
+ "httpGet": {
+ "path": "/",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 40,
+ "timeoutSeconds": 10
+ },
+ "readinessProbe": {
+ "httpGet": {
+ "path": "/",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 10,
+ "timeoutSeconds": 30
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "openshift",
+ "name": "postgresql:9.5"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 5432
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [
+ "/bin/sh",
+ "-i",
+ "-c",
+ "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"
+ ]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 5432
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DATABASE_USER}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DATABASE_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_POSTGRESQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "musicstore"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "required": true,
+ "description": "Maximum amount of memory the .NET Core container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_POSTGRESQL_LIMIT",
+ "displayName": "Memory Limit (PostgreSQL)",
+ "required": true,
+ "description": "Maximum amount of memory the PostgreSQL container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "DOTNET_IMAGE_STREAM_TAG",
+ "displayName": ".NET builder",
+ "required": true,
+ "description": "The image stream tag which is used to build the code.",
+ "value": "dotnet:1.1"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "required": true,
+ "description": "The OpenShift Namespace where the .NET builder ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "description": "The URL of the repository with your application source code.",
+ "value": "https://github.com/redhat-developer/s2i-aspnet-musicstore-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "value": "rel/1.1-example"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "DOTNET_STARTUP_PROJECT",
+ "displayName": "Startup Project",
+ "description": "Set this to the folder containing your startup project.",
+ "value": "samples/MusicStore"
+ },
+ {
+ "name": "DOTNET_ASSEMBLY_NAME",
+ "displayName": "Startup Assembly",
+ "description": "Set this when the assembly name is overridden in the project file."
+ },
+ {
+ "name": "DOTNET_NPM_TOOLS",
+ "displayName": "Npm Tools",
+ "description": "Set this to a space separated list of npm tools needed to publish."
+ },
+ {
+ "name": "DOTNET_TEST_PROJECTS",
+ "displayName": "Test projects",
+ "description": "Set this to a space separated list of test projects to run before publishing."
+ },
+ {
+ "name": "DOTNET_CONFIGURATION",
+ "displayName": "Configuration",
+ "description": "Set this to configuration (Release/Debug).",
+ "value": "Release"
+ },
+ {
+ "name": "DOTNET_RESTORE_SOURCES",
+ "displayName": "NuGet package sources",
+ "description": "Set this to override the NuGet.config sources."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the .NET Core service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "required": true,
+ "displayName": "Database Service Name",
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database Username",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{8}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "required": true,
+ "displayName": "Database Name",
+ "value": "musicstore"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "displayName": "Maximum Database Connections",
+ "value": "100"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "displayName": "Shared Buffer Amount",
+ "value": "12MB"
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json
index b0aef3cfc..264e4b2de 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json
@@ -110,6 +110,10 @@
"value": "true"
},
{
+ "name": "OPENSHIFT_JENKINS_JVM_ARCH",
+ "value": "${JVM_ARCH}"
+ },
+ {
"name": "KUBERNETES_MASTER",
"value": "https://kubernetes.default:443"
},
@@ -255,6 +259,12 @@
"value": "true"
},
{
+ "name": "JVM_ARCH",
+ "displayName": "Jenkins JVM Architecture",
+ "description": "Whether Jenkins runs with a 32 bit (i386) or 64 bit (x86_64) JVM.",
+ "value": "i386"
+ },
+ {
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json
index a542de219..b47bdf353 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json
@@ -127,6 +127,10 @@
"value": "true"
},
{
+ "name": "OPENSHIFT_JENKINS_JVM_ARCH",
+ "value": "${JVM_ARCH}"
+ },
+ {
"name": "KUBERNETES_MASTER",
"value": "https://kubernetes.default:443"
},
@@ -272,6 +276,12 @@
"value": "true"
},
{
+ "name": "JVM_ARCH",
+ "displayName": "Jenkins JVM Architecture",
+ "description": "Whether Jenkins runs with a 32 bit (i386) or 64 bit (x86_64) JVM.",
+ "value": "i386"
+ },
+ {
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml
new file mode 100644
index 000000000..14bdd1dca
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cloudforms
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /opt/nfs/volumes-app
+ server: 10.19.0.216
+ persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml
new file mode 100644
index 000000000..709d8d976
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: nfs-pv01
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /opt/nfs/volumes
+ server: 10.19.0.216
+ persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml
new file mode 100644
index 000000000..4f25a9c8f
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml
@@ -0,0 +1,479 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: cloudforms
+metadata:
+ name: cloudforms
+ annotations:
+ description: "CloudForms appliance with persistent storage"
+ tags: "instant-app,cloudforms,cfme"
+ iconClass: "icon-rails"
+objects:
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: "Exposes and load balances CloudForms pods"
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: ${NAME}
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: ${NAME}
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${NAME}
+ spec:
+ host: ${APPLICATION_DOMAIN}
+ port:
+ targetPort: https
+ tls:
+ termination: passthrough
+ to:
+ kind: Service
+ name: ${NAME}
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-app
+ annotations:
+ description: "Keeps track of changes in the CloudForms app image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-app
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: ${DATABASE_SERVICE_NAME}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: ${DATABASE_VOLUME_CAPACITY}
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: ${NAME}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: ${APPLICATION_VOLUME_CAPACITY}
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: ${NAME}
+ annotations:
+ description: "Defines how to deploy the CloudForms appliance"
+ spec:
+ template:
+ metadata:
+ labels:
+ name: ${NAME}
+ name: ${NAME}
+ spec:
+ volumes:
+ -
+ name: "cfme-app-volume"
+ persistentVolumeClaim:
+ claimName: ${NAME}
+ containers:
+ - image: cloudforms/cfme-openshift-app:${APPLICATION_IMG_TAG}
+ imagePullPolicy: IfNotPresent
+ name: cloudforms
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ volumeMounts:
+ -
+ name: "cfme-app-volume"
+ mountPath: "/persistent"
+ env:
+ -
+ name: "APPLICATION_INIT_DELAY"
+ value: "${APPLICATION_INIT_DELAY}"
+ -
+ name: "DATABASE_SERVICE_NAME"
+ value: "${DATABASE_SERVICE_NAME}"
+ -
+ name: "DATABASE_REGION"
+ value: "${DATABASE_REGION}"
+ -
+ name: "MEMCACHED_SERVICE_NAME"
+ value: "${MEMCACHED_SERVICE_NAME}"
+ -
+ name: "POSTGRESQL_USER"
+ value: "${DATABASE_USER}"
+ -
+ name: "POSTGRESQL_PASSWORD"
+ value: "${DATABASE_PASSWORD}"
+ -
+ name: "POSTGRESQL_DATABASE"
+ value: "${DATABASE_NAME}"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ resources:
+ requests:
+ memory: "${MEMORY_APPLICATION_MIN}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /opt/rh/cfme-container-scripts/sync-pv-data
+ replicas: 1
+ selector:
+ name: ${NAME}
+ triggers:
+ - type: "ConfigChange"
+ - type: "ImageChange"
+ imageChangeParams:
+ automatic: true
+ containerNames:
+ - "cloudforms"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-app:${APPLICATION_IMG_TAG}"
+ strategy:
+ type: "Recreate"
+ recreateParams:
+ timeoutSeconds: 1200
+- apiVersion: v1
+ kind: "Service"
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: "Exposes the memcached server"
+ spec:
+ ports:
+ -
+ name: "memcached"
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-memcached
+ annotations:
+ description: "Keeps track of changes in the CloudForms memcached image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-memcached
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: "Defines how to deploy memcached"
+ spec:
+ strategy:
+ type: "Recreate"
+ triggers:
+ -
+ type: "ImageChange"
+ imageChangeParams:
+ automatic: true
+ containerNames:
+ - "memcached"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
+ -
+ type: "ConfigChange"
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ -
+ name: "memcached"
+ image: "cloudforms/cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
+ ports:
+ -
+ containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ -
+ name: "MEMCACHED_MAX_MEMORY"
+ value: "${MEMCACHED_MAX_MEMORY}"
+ -
+ name: "MEMCACHED_MAX_CONNECTIONS"
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ -
+ name: "MEMCACHED_SLAB_PAGE_SIZE"
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ limits:
+ memory: "${MEMORY_MEMCACHED_LIMIT}"
+- apiVersion: v1
+ kind: "Service"
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: "Exposes the database server"
+ spec:
+ ports:
+ -
+ name: "postgresql"
+ port: 5432
+ targetPort: 5432
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-postgresql
+ annotations:
+ description: "Keeps track of changes in the CloudForms postgresql image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-postgresql
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: "Defines how to deploy the database"
+ spec:
+ strategy:
+ type: "Recreate"
+ triggers:
+ -
+ type: "ImageChange"
+ imageChangeParams:
+ automatic: true
+ containerNames:
+ - "postgresql"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
+ -
+ type: "ConfigChange"
+ replicas: 1
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ labels:
+ name: "${DATABASE_SERVICE_NAME}"
+ spec:
+ volumes:
+ -
+ name: "cfme-pgdb-volume"
+ persistentVolumeClaim:
+ claimName: ${DATABASE_SERVICE_NAME}
+ containers:
+ -
+ name: "postgresql"
+ image: "cloudforms/cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
+ ports:
+ -
+ containerPort: 5432
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 15
+ exec:
+ command:
+ - "/bin/sh"
+ - "-i"
+ - "-c"
+ - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 60
+ tcpSocket:
+ port: 5432
+ volumeMounts:
+ -
+ name: "cfme-pgdb-volume"
+ mountPath: "/var/lib/pgsql/data"
+ env:
+ -
+ name: "POSTGRESQL_USER"
+ value: "${DATABASE_USER}"
+ -
+ name: "POSTGRESQL_PASSWORD"
+ value: "${DATABASE_PASSWORD}"
+ -
+ name: "POSTGRESQL_DATABASE"
+ value: "${DATABASE_NAME}"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ resources:
+ limits:
+ memory: "${MEMORY_POSTGRESQL_LIMIT}"
+
+parameters:
+ -
+ name: "NAME"
+ displayName: Name
+ required: true
+ description: "The name assigned to all of the frontend objects defined in this template."
+ value: cloudforms
+ -
+ name: "DATABASE_SERVICE_NAME"
+ displayName: "PostgreSQL Service Name"
+ required: true
+ description: "The name of the OpenShift Service exposed for the PostgreSQL container."
+ value: "postgresql"
+ -
+ name: "DATABASE_USER"
+ displayName: "PostgreSQL User"
+ required: true
+ description: "PostgreSQL user that will access the database."
+ value: "root"
+ -
+ name: "DATABASE_PASSWORD"
+ displayName: "PostgreSQL Password"
+ required: true
+ description: "Password for the PostgreSQL user."
+ value: "smartvm"
+ -
+ name: "DATABASE_NAME"
+ required: true
+ displayName: "PostgreSQL Database Name"
+ description: "Name of the PostgreSQL database accessed."
+ value: "vmdb_production"
+ -
+ name: "DATABASE_REGION"
+ required: true
+ displayName: "Application Database Region"
+ description: "Database region that will be used for application."
+ value: "0"
+ -
+ name: "MEMCACHED_SERVICE_NAME"
+ required: true
+ displayName: "Memcached Service Name"
+ description: "The name of the OpenShift Service exposed for the Memcached container."
+ value: "memcached"
+ -
+ name: "MEMCACHED_MAX_MEMORY"
+ displayName: "Memcached Max Memory"
+ description: "Memcached maximum memory for memcached object storage in MB."
+ value: "64"
+ -
+ name: "MEMCACHED_MAX_CONNECTIONS"
+ displayName: "Memcached Max Connections"
+ description: "Memcached maximum number of connections allowed."
+ value: "1024"
+ -
+ name: "MEMCACHED_SLAB_PAGE_SIZE"
+ displayName: "Memcached Slab Page Size"
+ description: "Memcached size of each slab page."
+ value: "1m"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ displayName: "PostgreSQL Max Connections"
+ description: "PostgreSQL maximum number of database connections allowed."
+ value: "100"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ displayName: "PostgreSQL Shared Buffer Amount"
+ description: "Amount of memory dedicated for PostgreSQL shared memory buffers."
+ value: "64MB"
+ -
+ name: "MEMORY_APPLICATION_MIN"
+ displayName: "Application Memory Minimum"
+ required: true
+ description: "Minimum amount of memory the Application container will need."
+ value: "4096Mi"
+ -
+ name: "MEMORY_POSTGRESQL_LIMIT"
+ displayName: "PostgreSQL Memory Limit"
+ required: true
+ description: "Maximum amount of memory the PostgreSQL container can use."
+ value: "2048Mi"
+ -
+ name: "MEMORY_MEMCACHED_LIMIT"
+ displayName: "Memcached Memory Limit"
+ required: true
+ description: "Maximum amount of memory the Memcached container can use."
+ value: "256Mi"
+ -
+ name: "POSTGRESQL_IMG_TAG"
+ displayName: "PostgreSQL Image Tag"
+ description: "This is the PostgreSQL image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "MEMCACHED_IMG_TAG"
+ displayName: "Memcached Image Tag"
+ description: "This is the Memcached image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "APPLICATION_IMG_TAG"
+ displayName: "Application Image Tag"
+ description: "This is the Application image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "APPLICATION_DOMAIN"
+ displayName: "Application Hostname"
+ description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted."
+ value: ""
+ -
+ name: "APPLICATION_INIT_DELAY"
+ displayName: "Application Init Delay"
+ required: true
+ description: "Delay in seconds before we attempt to initialize the application."
+ value: "30"
+ -
+ name: "APPLICATION_VOLUME_CAPACITY"
+ displayName: "Application Volume Capacity"
+ required: true
+ description: "Volume space available for application data."
+ value: "1Gi"
+ -
+ name: "DATABASE_VOLUME_CAPACITY"
+ displayName: "Database Volume Capacity"
+ required: true
+ description: "Volume space available for database."
+ value: "1Gi"
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/README.md b/roles/openshift_examples/files/examples/v1.6/db-templates/README.md
new file mode 100644
index 000000000..a36d7ba7d
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/db-templates/README.md
@@ -0,0 +1,76 @@
+OpenShift 3 Database Examples
+=============================
+
+This directory contains example JSON templates to deploy databases in OpenShift.
+They can be used to immediately instantiate a database and expose it as a
+service in the current project, or to add a template that can be later used from
+the Web Console or the CLI.
+
+The examples can also be tweaked to create new templates.
+
+
+## Ephemeral vs. Persistent
+
+For each supported database, there are two template files.
+
+Files named `*-ephemeral-template.json` use
+"[emptyDir](https://docs.openshift.org/latest/dev_guide/volumes.html)" volumes
+for data storage, which means that data is lost after a pod restart.
+This is tolerable for experimenting, but not suitable for production use.
+
+The other templates, named `*-persistent-template.json`, use [persistent volume
+claims](https://docs.openshift.org/latest/architecture/additional_concepts/storage.html#persistent-volume-claims)
+to request persistent storage provided by [persistent
+volumes](https://docs.openshift.org/latest/architecture/additional_concepts/storage.html#persistent-volumes),
+that must have been created upfront.
+
+
+## Usage
+
+### Instantiating a new database service
+
+Use these instructions if you want to quickly deploy a new database service in
+your current project. Instantiate a new database service with this command:
+
+ $ oc new-app /path/to/template.json
+
+Replace `/path/to/template.json` with an appropriate path, that can be either a
+local path or an URL. Example:
+
+ $ oc new-app https://raw.githubusercontent.com/openshift/origin/master/examples/db-templates/mongodb-ephemeral-template.json
+
+The parameters listed in the output above can be tweaked by specifying values in
+the command line with the `-p` option:
+
+ $ oc new-app examples/db-templates/mongodb-ephemeral-template.json -p DATABASE_SERVICE_NAME=mydb -p MONGODB_USER=default
+
+Note that the persistent template requires an existing persistent volume,
+otherwise the deployment won't ever succeed.
+
+
+### Adding a database as a template
+
+Use these instructions if, instead of instantiating a service right away, you
+want to load the template into an OpenShift project so that it can be used
+later. Create the template with this command:
+
+ $ oc create -f /path/to/template.json
+
+Replace `/path/to/template.json` with an appropriate path, that can be either a
+local path or an URL. Example:
+
+ $ oc create -f https://raw.githubusercontent.com/openshift/origin/master/examples/db-templates/mongodb-ephemeral-template.json
+ template "mongodb-ephemeral" created
+
+The new template is now available to use in the Web Console or with `oc
+new-app`.
+
+
+## More information
+
+The usage of each supported database image is further documented in the links
+below:
+
+- [MySQL](https://docs.openshift.org/latest/using_images/db_images/mysql.html)
+- [PostgreSQL](https://docs.openshift.org/latest/using_images/db_images/postgresql.html)
+- [MongoDB](https://docs.openshift.org/latest/using_images/db_images/mongodb.html)
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json
new file mode 100644
index 000000000..f347f1f9f
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json
@@ -0,0 +1,229 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mariadb-ephemeral",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB (Ephemeral)",
+ "description": "MariaDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "iconClass": "icon-mariadb",
+ "tags": "database,mariadb",
+ "template.openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.",
+ "labels": {
+ "template": "mariadb-persistent-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MYSQL_USER}",
+ "database-password" : "${MYSQL_PASSWORD}",
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mariadb",
+ "port": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mariadb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mariadb:10.1",
+ "namespace": "${NAMESPACE}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mariadb",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 3306
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c",
+ "MYSQL_PWD=\"$MYSQL_PASSWORD\" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ }
+ },
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-root-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${MYSQL_DATABASE}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "imagePullPolicy": "IfNotPresent"
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "mariadb",
+ "required": true
+ },
+ {
+ "name": "MYSQL_USER",
+ "displayName": "MariaDB Connection Username",
+ "description": "Username for MariaDB user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "displayName": "MariaDB Connection Password",
+ "description": "Password for the MariaDB connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "displayName": "MariaDB root Password",
+ "description": "Password for the MariaDB root user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "displayName": "MariaDB Database Name",
+ "description": "Name of the MariaDB database accessed.",
+ "value": "sampledb",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json
new file mode 100644
index 000000000..6ed744777
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json
@@ -0,0 +1,253 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mariadb-persistent",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB (Persistent)",
+ "description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "iconClass": "icon-mariadb",
+ "tags": "database,mariadb",
+ "template.openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.",
+ "labels": {
+ "template": "mariadb-persistent-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MYSQL_USER}",
+ "database-password" : "${MYSQL_PASSWORD}",
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mariadb",
+ "port": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mariadb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mariadb:10.1",
+ "namespace": "${NAMESPACE}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mariadb",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 3306
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c",
+ "MYSQL_PWD=\"$MYSQL_PASSWORD\" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ }
+ },
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-root-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${MYSQL_DATABASE}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "imagePullPolicy": "IfNotPresent"
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "mariadb",
+ "required": true
+ },
+ {
+ "name": "MYSQL_USER",
+ "displayName": "MariaDB Connection Username",
+ "description": "Username for MariaDB user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "displayName": "MariaDB Connection Password",
+ "description": "Password for the MariaDB connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "displayName": "MariaDB root Password",
+ "description": "Password for the MariaDB root user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "displayName": "MariaDB Database Name",
+ "description": "Name of the MariaDB database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
+ "value": "1Gi",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json
new file mode 100644
index 000000000..97a8abf6d
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json
@@ -0,0 +1,258 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mongodb-ephemeral",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "MongoDB (Ephemeral)",
+ "description": "MongoDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "iconClass": "icon-mongodb",
+ "tags": "database,mongodb",
+ "template.openshift.io/long-description": "This template provides a standalone MongoDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mongodb.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.",
+ "labels": {
+ "template": "mongodb-ephemeral-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MONGODB_USER}",
+ "database-password" : "${MONGODB_PASSWORD}",
+ "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mongo",
+ "protocol": "TCP",
+ "port": 27017,
+ "targetPort": 27017,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mongodb:${MONGODB_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mongodb",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 3,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 27017
+ }
+ },
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-admin-password"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${MONGODB_DATABASE}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mongodb/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "mongodb",
+ "required": true
+ },
+ {
+ "name": "MONGODB_USER",
+ "displayName": "MongoDB Connection Username",
+ "description": "Username for MongoDB user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "displayName": "MongoDB Connection Password",
+ "description": "Password for the MongoDB connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "displayName": "MongoDB Database Name",
+ "description": "Name of the MongoDB database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "displayName": "MongoDB Admin Password",
+ "description": "Password for the database admin user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_VERSION",
+ "displayName": "Version of MongoDB Image",
+ "description": "Version of MongoDB image to be used (2.4, 2.6, 3.2 or latest).",
+ "value": "3.2",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json
new file mode 100644
index 000000000..0656219fb
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json
@@ -0,0 +1,282 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mongodb-persistent",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "MongoDB (Persistent)",
+ "description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "iconClass": "icon-mongodb",
+ "tags": "database,mongodb",
+ "template.openshift.io/long-description": "This template provides a standalone MongoDB server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mongodb.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.",
+ "labels": {
+ "template": "mongodb-persistent-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MONGODB_USER}",
+ "database-password" : "${MONGODB_PASSWORD}",
+ "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mongo",
+ "protocol": "TCP",
+ "port": 27017,
+ "targetPort": 27017,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mongodb:${MONGODB_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mongodb",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 3,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 27017
+ }
+ },
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-admin-password"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${MONGODB_DATABASE}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mongodb/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "mongodb",
+ "required": true
+ },
+ {
+ "name": "MONGODB_USER",
+ "displayName": "MongoDB Connection Username",
+ "description": "Username for MongoDB user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "displayName": "MongoDB Connection Password",
+ "description": "Password for the MongoDB connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "displayName": "MongoDB Database Name",
+ "description": "Name of the MongoDB database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "displayName": "MongoDB Admin Password",
+ "description": "Password for the database admin user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "MONGODB_VERSION",
+ "displayName": "Version of MongoDB Image",
+ "description": "Version of MongoDB image to be used (2.4, 2.6, 3.2 or latest).",
+ "value": "3.2",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json
new file mode 100644
index 000000000..d60b4647d
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json
@@ -0,0 +1,258 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mysql-ephemeral",
+ "annotations": {
+ "openshift.io/display-name": "MySQL (Ephemeral)",
+ "description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "iconClass": "icon-mysql-database",
+ "tags": "database,mysql",
+ "template.openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mysql.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.",
+ "labels": {
+ "template": "mysql-ephemeral-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MYSQL_USER}",
+ "database-password" : "${MYSQL_PASSWORD}",
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "protocol": "TCP",
+ "port": 3306,
+ "targetPort": 3306,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mysql:${MYSQL_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mysql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c",
+ "MYSQL_PWD=\"$MYSQL_PASSWORD\" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ }
+ },
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-root-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${MYSQL_DATABASE}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "mysql",
+ "required": true
+ },
+ {
+ "name": "MYSQL_USER",
+ "displayName": "MySQL Connection Username",
+ "description": "Username for MySQL user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "displayName": "MySQL Connection Password",
+ "description": "Password for the MySQL connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "displayName": "MySQL root user Password",
+ "description": "Password for the MySQL root user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "displayName": "MySQL Database Name",
+ "description": "Name of the MySQL database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "MYSQL_VERSION",
+ "displayName": "Version of MySQL Image",
+ "description": "Version of MySQL image to be used (5.5, 5.6, 5.7, or latest).",
+ "value": "5.7",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json
new file mode 100644
index 000000000..c2bfa40fd
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json
@@ -0,0 +1,260 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mysql-persistent",
+ "annotations": {
+ "openshift.io/display-name": "MySQL (Persistent)",
+ "description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "iconClass": "icon-mysql-database",
+ "tags": "database,mysql",
+ "template.openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mysql.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.",
+ "labels": {
+ "template": "mysql-persistent-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MYSQL_USER}",
+ "database-password" : "${MYSQL_PASSWORD}",
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "port": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mysql:${MYSQL_VERSION}",
+ "namespace": "${NAMESPACE}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mysql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 3306
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c",
+ "MYSQL_PWD=\"$MYSQL_PASSWORD\" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ }
+ },
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-root-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${MYSQL_DATABASE}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "imagePullPolicy": "IfNotPresent"
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "mysql",
+ "required": true
+ },
+ {
+ "name": "MYSQL_USER",
+ "displayName": "MySQL Connection Username",
+ "description": "Username for MySQL user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "displayName": "MySQL Connection Password",
+ "description": "Password for the MySQL connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "displayName": "MySQL root user Password",
+ "description": "Password for the MySQL root user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "displayName": "MySQL Database Name",
+ "description": "Name of the MySQL database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "MYSQL_VERSION",
+ "displayName": "Version of MySQL Image",
+ "description": "Version of MySQL image to be used (5.5, 5.6, 5.7, or latest).",
+ "value": "5.7",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json
new file mode 100644
index 000000000..7a16e742a
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json
@@ -0,0 +1,240 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "postgresql-ephemeral",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL (Ephemeral)",
+ "description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "iconClass": "icon-postgresql",
+ "tags": "database,postgresql",
+ "template.openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/postgresql.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
+ "labels": {
+ "template": "postgresql-ephemeral-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${POSTGRESQL_USER}",
+ "database-password" : "${POSTGRESQL_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "protocol": "TCP",
+ "port": 5432,
+ "targetPort": 5432,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "postgresql:${POSTGRESQL_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "psql -h 127.0.0.1 -U $POSTGRESQL_USER -q -d $POSTGRESQL_DATABASE -c 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 5432
+ }
+ },
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${POSTGRESQL_DATABASE}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "postgresql",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_USER",
+ "displayName": "PostgreSQL Connection Username",
+ "description": "Username for PostgreSQL user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "displayName": "PostgreSQL Connection Password",
+ "description": "Password for the PostgreSQL connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "displayName": "PostgreSQL Database Name",
+ "description": "Name of the PostgreSQL database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_VERSION",
+ "displayName": "Version of PostgreSQL Image",
+ "description": "Version of PostgreSQL image to be used (9.2, 9.4, 9.5 or latest).",
+ "value": "9.5",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json
new file mode 100644
index 000000000..242212d6f
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json
@@ -0,0 +1,264 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "postgresql-persistent",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL (Persistent)",
+ "description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "iconClass": "icon-postgresql",
+ "tags": "database,postgresql",
+ "template.openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/postgresql.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
+ "labels": {
+ "template": "postgresql-persistent-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${POSTGRESQL_USER}",
+ "database-password" : "${POSTGRESQL_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "protocol": "TCP",
+ "port": 5432,
+ "targetPort": 5432,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "postgresql:${POSTGRESQL_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "psql -h 127.0.0.1 -U $POSTGRESQL_USER -q -d $POSTGRESQL_DATABASE -c 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 5432
+ }
+ },
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${POSTGRESQL_DATABASE}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "postgresql",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_USER",
+ "displayName": "PostgreSQL Connection Username",
+ "description": "Username for PostgreSQL user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "displayName": "PostgreSQL Connection Password",
+ "description": "Password for the PostgreSQL connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "displayName": "PostgreSQL Database Name",
+ "description": "Name of the PostgreSQL database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_VERSION",
+ "displayName": "Version of PostgreSQL Image",
+ "description": "Version of PostgreSQL image to be used (9.2, 9.4, 9.5 or latest).",
+ "value": "9.5",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json
new file mode 100644
index 000000000..e9af50937
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json
@@ -0,0 +1,211 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redis-ephemeral",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "Redis (Ephemeral)",
+ "description": "Redis in-memory data structure store, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "iconClass": "icon-redis",
+ "tags": "database,redis",
+ "template.openshift.io/long-description": "This template provides a standalone Redis server. The data is not stored on persistent storage, so any restart of the service will result in all data being lost.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/sclorg/redis-container/tree/master/3.2",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Password: ${REDIS_PASSWORD}\n Connection URL: redis://${DATABASE_SERVICE_NAME}:6379/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.",
+ "labels": {
+ "template": "redis-ephemeral-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-password" : "${REDIS_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "redis",
+ "protocol": "TCP",
+ "port": 6379,
+ "targetPort": 6379,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "redis"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "redis:${REDIS_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "redis",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 6379,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "test \"$(redis-cli -h 127.0.0.1 -a $REDIS_PASSWORD ping)\" == \"PONG\""]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 6379
+ }
+ },
+ "env": [
+ {
+ "name": "REDIS_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/redis/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "redis",
+ "required": true
+ },
+ {
+ "name": "REDIS_PASSWORD",
+ "displayName": "Redis Connection Password",
+ "description": "Password for the Redis connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "REDIS_VERSION",
+ "displayName": "Version of Redis Image",
+ "description": "Version of Redis image to be used (3.2 or latest).",
+ "value": "3.2",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json
new file mode 100644
index 000000000..aa27578a9
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json
@@ -0,0 +1,235 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redis-persistent",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "Redis (Persistent)",
+ "description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
+ "iconClass": "icon-redis",
+ "tags": "database,redis",
+ "template.openshift.io/long-description": "This template provides a standalone Redis server. The data is stored on persistent storage.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/sclorg/redis-container/tree/master/3.2",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Password: ${REDIS_PASSWORD}\n Connection URL: redis://${DATABASE_SERVICE_NAME}:6379/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.",
+ "labels": {
+ "template": "redis-persistent-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-password" : "${REDIS_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "redis",
+ "protocol": "TCP",
+ "port": 6379,
+ "targetPort": 6379,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "redis"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "redis:${REDIS_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "redis",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 6379,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "test \"$(redis-cli -h 127.0.0.1 -a $REDIS_PASSWORD ping)\" == \"PONG\""]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 6379
+ }
+ },
+ "env": [
+ {
+ "name": "REDIS_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/redis/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "redis",
+ "required": true
+ },
+ {
+ "name": "REDIS_PASSWORD",
+ "displayName": "Redis Connection Password",
+ "description": "Password for the Redis connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "REDIS_VERSION",
+ "displayName": "Version of Redis Image",
+ "description": "Version of Redis image to be used (3.2 or latest).",
+ "value": "3.2",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json
new file mode 100644
index 000000000..857ffa980
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json
@@ -0,0 +1,79 @@
+{
+ "kind": "ImageStreamList",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dotnet-image-streams",
+ "annotations": {
+ "description": "ImageStream definitions for .NET Core on RHEL"
+ }
+ },
+ "items": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dotnet",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core (Latest)",
+ "description": "Build and run .NET Core applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core available on OpenShift, including major versions updates.",
+ "iconClass": "icon-dotnet",
+ "tags": "builder,.net,dotnet,dotnetcore",
+ "supports":"dotnet",
+ "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git",
+ "sampleContextDir": "app",
+ "sampleRef": "dotnetcore-1.1"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "1.1"
+ }
+ },
+ {
+ "name": "1.1",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core 1.1",
+ "description": "Build and run .NET Core 1.1 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.1/README.md.",
+ "iconClass": "icon-dotnet",
+ "tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore11",
+ "supports":"dotnet:1.1,dotnet",
+ "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git",
+ "sampleContextDir": "app",
+ "sampleRef": "dotnetcore-1.1",
+ "version": "1.1"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/dotnet/dotnetcore-11-rhel7:1.1"
+ }
+ },
+ {
+ "name": "1.0",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core 1.0",
+ "description": "Build and run .NET Core 1.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.0/README.md.",
+ "iconClass": "icon-dotnet",
+ "tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore10",
+ "supports":"dotnet:1.0,dotnet",
+ "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git",
+ "sampleContextDir": "app",
+ "sampleRef": "dotnetcore-1.0",
+ "version": "1.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/dotnet/dotnetcore-10-rhel7:1.0"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json
new file mode 100644
index 000000000..1a90a9409
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json
@@ -0,0 +1,829 @@
+{
+ "kind": "ImageStreamList",
+ "apiVersion": "v1",
+ "metadata": {},
+ "items": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "ruby",
+ "annotations": {
+ "openshift.io/display-name": "Ruby"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Ruby (Latest)",
+ "description": "Build and run Ruby applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.3/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2.3"
+ }
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "openshift.io/display-name": "Ruby 2.0",
+ "description": "Build and run Ruby 2.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.0/README.md.",
+ "iconClass": "icon-ruby",
+ "tags": "hidden,builder,ruby",
+ "supports": "ruby:2.0,ruby",
+ "version": "2.0",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/ruby-20-centos7:latest"
+ }
+ },
+ {
+ "name": "2.2",
+ "annotations": {
+ "openshift.io/display-name": "Ruby 2.2",
+ "description": "Build and run Ruby 2.2 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.2/README.md.",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.2,ruby",
+ "version": "2.2",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/ruby-22-centos7:latest"
+ }
+ },
+ {
+ "name": "2.3",
+ "annotations": {
+ "openshift.io/display-name": "Ruby 2.3",
+ "description": "Build and run Ruby 2.3 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/blob/master/2.3/README.md.",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.3,ruby",
+ "version": "2.3",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/ruby-23-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs",
+ "annotations": {
+ "openshift.io/display-name": "Node.js"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Node.js (Latest)",
+ "description": "Build and run Node.js applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "4"
+ }
+ },
+ {
+ "name": "0.10",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 0.10",
+ "description": "DEPRECATED: Build and run Node.js 0.10 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/0.10/README.md.",
+ "iconClass": "icon-nodejs",
+ "tags": "hidden,nodejs",
+ "supports":"nodejs:0.10,nodejs:0.1,nodejs",
+ "version": "0.10",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/nodejs-010-centos7:latest"
+ }
+ },
+ {
+ "name": "4",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 4",
+ "description": "Build and run Node.js 4 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:4,nodejs",
+ "version": "4",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/nodejs-4-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "perl",
+ "annotations": {
+ "openshift.io/display-name": "Perl"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Perl (Latest)",
+ "description": "Build and run Perl applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Perl available on OpenShift, including major versions updates.",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "5.24"
+ }
+ },
+ {
+ "name": "5.16",
+ "annotations": {
+ "openshift.io/display-name": "Perl 5.16",
+ "description": "Build and run Perl 5.16 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.16/README.md.",
+ "iconClass": "icon-perl",
+ "tags": "hidden,builder,perl",
+ "supports":"perl:5.16,perl",
+ "version": "5.16",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/perl-516-centos7:latest"
+ }
+ },
+ {
+ "name": "5.20",
+ "annotations": {
+ "openshift.io/display-name": "Perl 5.20",
+ "description": "Build and run Perl 5.20 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.20,perl",
+ "version": "5.20",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/perl-520-centos7:latest"
+ }
+ },
+ {
+ "name": "5.24",
+ "annotations": {
+ "openshift.io/display-name": "Perl 5.24",
+ "description": "Build and run Perl 5.24 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.24/README.md.",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.24,perl",
+ "version": "5.24",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/perl-524-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "php",
+ "annotations": {
+ "openshift.io/display-name": "PHP"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "PHP (Latest)",
+ "description": "Build and run PHP applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "7.0"
+ }
+ },
+ {
+ "name": "5.5",
+ "annotations": {
+ "openshift.io/display-name": "PHP 5.5",
+ "description": "Build and run PHP 5.5 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.5/README.md.",
+ "iconClass": "icon-php",
+ "tags": "hidden,builder,php",
+ "supports":"php:5.5,php",
+ "version": "5.5",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/php-55-centos7:latest"
+ }
+ },
+ {
+ "name": "5.6",
+ "annotations": {
+ "openshift.io/display-name": "PHP 5.6",
+ "description": "Build and run PHP 5.6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:5.6,php",
+ "version": "5.6",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/php-56-centos7:latest"
+ }
+ },
+ {
+ "name": "7.0",
+ "annotations": {
+ "openshift.io/display-name": "PHP 7.0",
+ "description": "Build and run PHP 7.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.0/README.md.",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:7.0,php",
+ "version": "7.0",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/php-70-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "python",
+ "annotations": {
+ "openshift.io/display-name": "Python"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Python (Latest)",
+ "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "3.5"
+ }
+ },
+ {
+ "name": "3.3",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.3",
+ "description": "Build and run Python 3.3 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.3/README.md.",
+ "iconClass": "icon-python",
+ "tags": "hidden,builder,python",
+ "supports":"python:3.3,python",
+ "version": "3.3",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/python-33-centos7:latest"
+ }
+ },
+ {
+ "name": "2.7",
+ "annotations": {
+ "openshift.io/display-name": "Python 2.7",
+ "description": "Build and run Python 2.7 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/2.7/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:2.7,python",
+ "version": "2.7",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/python-27-centos7:latest"
+ }
+ },
+ {
+ "name": "3.4",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.4",
+ "description": "Build and run Python 3.4 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.4/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.4,python",
+ "version": "3.4",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/python-34-centos7:latest"
+ }
+ },
+ {
+ "name": "3.5",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.5",
+ "description": "Build and run Python 3.5 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.5,python",
+ "version": "3.5",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/python-35-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "wildfly",
+ "annotations": {
+ "openshift.io/display-name": "WildFly"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "WildFly (Latest)",
+ "description": "Build and run WildFly applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of WildFly available on OpenShift, including major versions updates.",
+ "iconClass": "icon-wildfly",
+ "tags": "builder,wildfly,java",
+ "supports":"jee,java",
+ "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "10.1"
+ }
+ },
+ {
+ "name": "8.1",
+ "annotations": {
+ "openshift.io/display-name": "WildFly 8.1",
+ "description": "Build and run WildFly 8.1 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
+ "iconClass": "icon-wildfly",
+ "tags": "builder,wildfly,java",
+ "supports":"wildfly:8.1,jee,java",
+ "version": "8.1",
+ "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/wildfly-81-centos7:latest"
+ }
+ },
+ {
+ "name": "9.0",
+ "annotations": {
+ "openshift.io/display-name": "WildFly 9.0",
+ "description": "Build and run WildFly 9.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
+ "iconClass": "icon-wildfly",
+ "tags": "builder,wildfly,java",
+ "supports":"wildfly:9.0,jee,java",
+ "version": "9.0",
+ "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/wildfly-90-centos7:latest"
+ }
+ },
+ {
+ "name": "10.0",
+ "annotations": {
+ "openshift.io/display-name": "WildFly 10.0",
+ "description": "Build and run WildFly 10.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
+ "iconClass": "icon-wildfly",
+ "tags": "builder,wildfly,java",
+ "supports":"wildfly:10.0,jee,java",
+ "version": "10.0",
+ "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/wildfly-100-centos7:latest"
+ }
+ },
+ {
+ "name": "10.1",
+ "annotations": {
+ "openshift.io/display-name": "WildFly 10.1",
+ "description": "Build and run WildFly 10.1 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
+ "iconClass": "icon-wildfly",
+ "tags": "builder,wildfly,java",
+ "supports":"wildfly:10.1,jee,java",
+ "version": "10.1",
+ "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/wildfly-101-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mysql",
+ "annotations": {
+ "openshift.io/display-name": "MySQL"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "MySQL (Latest)",
+ "description": "Provides a MySQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MySQL available on OpenShift, including major versions updates.",
+ "iconClass": "icon-mysql-database",
+ "tags": "mysql"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "5.7"
+ }
+ },
+ {
+ "name": "5.5",
+ "annotations": {
+ "openshift.io/display-name": "MySQL 5.5",
+ "description": "Provides a MySQL 5.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.5/README.md.",
+ "iconClass": "icon-mysql-database",
+ "tags": "hidden,mysql",
+ "version": "5.5"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/mysql-55-centos7:latest"
+ }
+ },
+ {
+ "name": "5.6",
+ "annotations": {
+ "openshift.io/display-name": "MySQL 5.6",
+ "description": "Provides a MySQL 5.6 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.",
+ "iconClass": "icon-mysql-database",
+ "tags": "mysql",
+ "version": "5.6"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/mysql-56-centos7:latest"
+ }
+ },
+ {
+ "name": "5.7",
+ "annotations": {
+ "openshift.io/display-name": "MySQL 5.7",
+ "description": "Provides a MySQL 5.7 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.7/README.md.",
+ "iconClass": "icon-mysql-database",
+ "tags": "mysql",
+ "version": "5.7"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/mysql-57-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mariadb",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB (Latest)",
+ "description": "Provides a MariaDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.",
+ "iconClass": "icon-mariadb",
+ "tags": "mariadb"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "10.1"
+ }
+ },
+ {
+ "name": "10.1",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB 10.1",
+ "description": "Provides a MariaDB 10.1 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.",
+ "iconClass": "icon-mariadb",
+ "tags": "mariadb",
+ "version": "10.1"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/mariadb-101-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "postgresql",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL (Latest)",
+ "description": "Provides a PostgreSQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.",
+ "iconClass": "icon-postgresql",
+ "tags": "postgresql"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "9.5"
+ }
+ },
+ {
+ "name": "9.2",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL 9.2",
+ "description": "Provides a PostgreSQL 9.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.",
+ "iconClass": "icon-postgresql",
+ "tags": "hidden,postgresql",
+ "version": "9.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/postgresql-92-centos7:latest"
+ }
+ },
+ {
+ "name": "9.4",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL 9.4",
+ "description": "Provides a PostgreSQL 9.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4.",
+ "iconClass": "icon-postgresql",
+ "tags": "postgresql",
+ "version": "9.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/postgresql-94-centos7:latest"
+ }
+ },
+ {
+ "name": "9.5",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL 9.5",
+ "description": "Provides a PostgreSQL 9.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.",
+ "iconClass": "icon-postgresql",
+ "tags": "postgresql",
+ "version": "9.5"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/postgresql-95-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mongodb",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB (Latest)",
+ "description": "Provides a MongoDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.",
+ "iconClass": "icon-mongodb",
+ "tags": "mongodb"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "3.2"
+ }
+ },
+ {
+ "name": "2.4",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB 2.4",
+ "description": "Provides a MongoDB 2.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.4/README.md.",
+ "iconClass": "icon-mongodb",
+ "tags": "hidden,mongodb",
+ "version": "2.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/mongodb-24-centos7:latest"
+ }
+ },
+ {
+ "name": "2.6",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB 2.6",
+ "description": "Provides a MongoDB 2.6 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.6/README.md.",
+ "iconClass": "icon-mongodb",
+ "tags": "mongodb",
+ "version": "2.6"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/mongodb-26-centos7:latest"
+ }
+ },
+ {
+ "name": "3.2",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB 3.2",
+ "description": "Provides a MongoDB 3.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.",
+ "iconClass": "icon-mongodb",
+ "tags": "mongodb",
+ "version": "3.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/mongodb-32-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redis",
+ "annotations": {
+ "openshift.io/display-name": "Redis"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Redis (Latest)",
+ "description": "Provides a Redis database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Redis available on OpenShift, including major versions updates.",
+ "iconClass": "icon-redis",
+ "tags": "redis"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "3.2"
+ }
+ },
+ {
+ "name": "3.2",
+ "annotations": {
+ "openshift.io/display-name": "Redis 3.2",
+ "description": "Provides a Redis 3.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.",
+ "iconClass": "icon-redis",
+ "tags": "redis",
+ "version": "3.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/redis-32-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins (Latest)",
+ "description": "Provides a Jenkins server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Jenkins available on OpenShift, including major versions updates.",
+ "iconClass": "icon-jenkins",
+ "tags": "jenkins"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2"
+ }
+ },
+ {
+ "name": "1",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins 1.X",
+ "description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
+ "iconClass": "icon-jenkins",
+ "tags": "jenkins",
+ "version": "1.x"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/jenkins-1-centos7:latest"
+ }
+ },
+ {
+ "name": "2",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins 2.X",
+ "description": "Provides a Jenkins v2.x server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
+ "iconClass": "icon-jenkins",
+ "tags": "jenkins",
+ "version": "2.x"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/jenkins-2-centos7:latest"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json
new file mode 100644
index 000000000..eb94c3bb4
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json
@@ -0,0 +1,736 @@
+{
+ "kind": "ImageStreamList",
+ "apiVersion": "v1",
+ "metadata": {},
+ "items": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "ruby",
+ "annotations": {
+ "openshift.io/display-name": "Ruby"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Ruby (Latest)",
+ "description": "Build and run Ruby applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.3/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2.3"
+ }
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "openshift.io/display-name": "Ruby 2.0",
+ "description": "Build and run Ruby 2.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.0/README.md.",
+ "iconClass": "icon-ruby",
+ "tags": "hidden,builder,ruby",
+ "supports": "ruby:2.0,ruby",
+ "version": "2.0",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/ruby-20-rhel7:latest"
+ }
+ },
+ {
+ "name": "2.2",
+ "annotations": {
+ "openshift.io/display-name": "Ruby 2.2",
+ "description": "Build and run Ruby 2.2 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.2/README.md.",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.2,ruby",
+ "version": "2.2",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/ruby-22-rhel7:latest"
+ }
+ },
+ {
+ "name": "2.3",
+ "annotations": {
+ "openshift.io/display-name": "Ruby 2.3",
+ "description": "Build and run Ruby 2.3 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/blob/master/2.3/README.md.",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.3,ruby",
+ "version": "2.3",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/ruby-23-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs",
+ "annotations": {
+ "openshift.io/display-name": "Node.js"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Node.js (Latest)",
+ "description": "Build and run Node.js applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "4"
+ }
+ },
+ {
+ "name": "0.10",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 0.10",
+ "description": "DEPRECATED: Build and run Node.js 0.10 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/0.10/README.md.",
+ "iconClass": "icon-nodejs",
+ "tags": "hidden,nodejs",
+ "supports":"nodejs:0.10,nodejs:0.1,nodejs",
+ "version": "0.10",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/nodejs-010-rhel7:latest"
+ }
+ },
+ {
+ "name": "4",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 4",
+ "description": "Build and run Node.js 4 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:4,nodejs",
+ "version": "4",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/nodejs-4-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "perl",
+ "annotations": {
+ "openshift.io/display-name": "Perl"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Perl (Latest)",
+ "description": "Build and run Perl applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Perl available on OpenShift, including major versions updates.",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "5.24"
+ }
+ },
+ {
+ "name": "5.16",
+ "annotations": {
+ "openshift.io/display-name": "Perl 5.16",
+ "description": "Build and run Perl 5.16 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.16/README.md.",
+ "iconClass": "icon-perl",
+ "tags": "hidden,builder,perl",
+ "supports":"perl:5.16,perl",
+ "version": "5.16",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/perl-516-rhel7:latest"
+ }
+ },
+ {
+ "name": "5.20",
+ "annotations": {
+ "openshift.io/display-name": "Perl 5.20",
+ "description": "Build and run Perl 5.20 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.20,perl",
+ "version": "5.20",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/perl-520-rhel7:latest"
+ }
+ },
+ {
+ "name": "5.24",
+ "annotations": {
+ "openshift.io/display-name": "Perl 5.24",
+ "description": "Build and run Perl 5.24 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.24/README.md.",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.24,perl",
+ "version": "5.24",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/perl-524-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "php",
+ "annotations": {
+ "openshift.io/display-name": "PHP"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "PHP (Latest)",
+ "description": "Build and run PHP applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "7.0"
+ }
+ },
+ {
+ "name": "5.5",
+ "annotations": {
+ "openshift.io/display-name": "PHP 5.5",
+ "description": "Build and run PHP 5.5 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.5/README.md.",
+ "iconClass": "icon-php",
+ "tags": "hidden,builder,php",
+ "supports":"php:5.5,php",
+ "version": "5.5",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/php-55-rhel7:latest"
+ }
+ },
+ {
+ "name": "5.6",
+ "annotations": {
+ "openshift.io/display-name": "PHP 5.6",
+ "description": "Build and run PHP 5.6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:5.6,php",
+ "version": "5.6",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/php-56-rhel7:latest"
+ }
+ },
+ {
+ "name": "7.0",
+ "annotations": {
+ "openshift.io/display-name": "PHP 7.0",
+ "description": "Build and run PHP 7.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.0/README.md.",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:7.0,php",
+ "version": "7.0",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/php-70-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "python",
+ "annotations": {
+ "openshift.io/display-name": "Python"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Python (Latest)",
+ "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "3.5"
+ }
+ },
+ {
+ "name": "3.3",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.3",
+ "description": "Build and run Python 3.3 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.3/README.md.",
+ "iconClass": "icon-python",
+ "tags": "hidden,builder,python",
+ "supports":"python:3.3,python",
+ "version": "3.3",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/python-33-rhel7:latest"
+ }
+ },
+ {
+ "name": "2.7",
+ "annotations": {
+ "openshift.io/display-name": "Python 2.7",
+ "description": "Build and run Python 2.7 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/2.7/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:2.7,python",
+ "version": "2.7",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/python-27-rhel7:latest"
+ }
+ },
+ {
+ "name": "3.4",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.4",
+ "description": "Build and run Python 3.4 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.4/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.4,python",
+ "version": "3.4",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/python-34-rhel7:latest"
+ }
+ },
+ {
+ "name": "3.5",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.5",
+ "description": "Build and run Python 3.5 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.5,python",
+ "version": "3.5",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/python-35-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mysql",
+ "annotations": {
+ "openshift.io/display-name": "MySQL"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "MySQL (Latest)",
+ "description": "Provides a MySQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MySQL available on OpenShift, including major versions updates.",
+ "iconClass": "icon-mysql-database",
+ "tags": "mysql"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "5.7"
+ }
+ },
+ {
+ "name": "5.5",
+ "annotations": {
+ "openshift.io/display-name": "MySQL 5.5",
+ "description": "Provides a MySQL 5.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.5/README.md.",
+ "iconClass": "icon-mysql-database",
+ "tags": "hidden,mysql",
+ "version": "5.5"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/mysql-55-rhel7:latest"
+ }
+ },
+ {
+ "name": "5.6",
+ "annotations": {
+ "openshift.io/display-name": "MySQL 5.6",
+ "description": "Provides a MySQL 5.6 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.",
+ "iconClass": "icon-mysql-database",
+ "tags": "mysql",
+ "version": "5.6"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/mysql-56-rhel7:latest"
+ }
+ },
+ {
+ "name": "5.7",
+ "annotations": {
+ "openshift.io/display-name": "MySQL 5.7",
+ "description": "Provides a MySQL 5.7 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.7/README.md.",
+ "iconClass": "icon-mysql-database",
+ "tags": "mysql",
+ "version": "5.7"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/mysql-57-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mariadb",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB (Latest)",
+ "description": "Provides a MariaDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.",
+ "iconClass": "icon-mariadb",
+ "tags": "mariadb"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "10.1"
+ }
+ },
+ {
+ "name": "10.1",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB 10.1",
+ "description": "Provides a MariaDB 10.1 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.",
+ "iconClass": "icon-mariadb",
+ "tags": "mariadb",
+ "version": "10.1"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/mariadb-101-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "postgresql",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL (Latest)",
+ "description": "Provides a PostgreSQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.",
+ "iconClass": "icon-postgresql",
+ "tags": "postgresql"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "9.5"
+ }
+ },
+ {
+ "name": "9.2",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL 9.2",
+ "description": "Provides a PostgreSQL 9.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.",
+ "iconClass": "icon-postgresql",
+ "tags": "hidden,postgresql",
+ "version": "9.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/postgresql-92-rhel7:latest"
+ }
+ },
+ {
+ "name": "9.4",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL 9.4",
+ "description": "Provides a PostgreSQL 9.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4.",
+ "iconClass": "icon-postgresql",
+ "tags": "postgresql",
+ "version": "9.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/postgresql-94-rhel7:latest"
+ }
+ },
+ {
+ "name": "9.5",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL 9.5",
+ "description": "Provides a PostgreSQL 9.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.",
+ "iconClass": "icon-postgresql",
+ "tags": "postgresql",
+ "version": "9.5"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/postgresql-95-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mongodb",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB (Latest)",
+ "description": "Provides a MongoDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.",
+ "iconClass": "icon-mongodb",
+ "tags": "mongodb"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "3.2"
+ }
+ },
+ {
+ "name": "2.4",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB 2.4",
+ "description": "Provides a MongoDB 2.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.4/README.md.",
+ "iconClass": "icon-mongodb",
+ "tags": "hidden,mongodb",
+ "version": "2.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/mongodb-24-rhel7:latest"
+ }
+ },
+ {
+ "name": "2.6",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB 2.6",
+ "description": "Provides a MongoDB 2.6 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.6/README.md.",
+ "iconClass": "icon-mongodb",
+ "tags": "mongodb",
+ "version": "2.6"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/mongodb-26-rhel7:latest"
+ }
+ },
+ {
+ "name": "3.2",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB 3.2",
+ "description": "Provides a MongoDB 3.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.",
+ "iconClass": "icon-mongodb",
+ "tags": "mongodb",
+ "version": "3.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/mongodb-32-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redis",
+ "annotations": {
+ "openshift.io/display-name": "Redis"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Redis (Latest)",
+ "description": "Provides a Redis database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Redis available on OpenShift, including major versions updates.",
+ "iconClass": "icon-redis",
+ "tags": "redis"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "3.2"
+ }
+ },
+ {
+ "name": "3.2",
+ "annotations": {
+ "openshift.io/display-name": "Redis 3.2",
+ "description": "Provides a Redis 3.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.",
+ "iconClass": "icon-redis",
+ "tags": "redis",
+ "version": "3.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/redis-32-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins (Latest)",
+ "description": "Provides a Jenkins server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Jenkins available on OpenShift, including major versions updates.",
+ "iconClass": "icon-jenkins",
+ "tags": "jenkins"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2"
+ }
+ },
+ {
+ "name": "1",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins 1.X",
+ "description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
+ "iconClass": "icon-jenkins",
+ "tags": "jenkins",
+ "version": "1.x"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/jenkins-1-rhel7:latest"
+ }
+ },
+ {
+ "name": "2",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins 2.X",
+ "description": "Provides a Jenkins 2.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
+ "iconClass": "icon-jenkins",
+ "tags": "jenkins",
+ "version": "2.x"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/jenkins-2-rhel7:latest"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md
new file mode 100644
index 000000000..f48d8d4a8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md
@@ -0,0 +1,27 @@
+QuickStarts
+===========
+
+QuickStarts provide the basic skeleton of an application. Generally they
+reference a repository containing very simple source code that implements a
+trivial application using a particular framework. In addition they define any
+components needed for the application including a Build configuration,
+supporting services such as Databases, etc.
+
+You can instantiate these templates as is, or fork the source repository they
+reference and supply your forked repository as the source-repository when
+instantiating them.
+
+* [CakePHP](https://raw.githubusercontent.com/openshift/cakephp-ex/master/openshift/templates/cakephp-mysql.json) - Provides a basic CakePHP application with a MySQL database. For more information see the [source repository](https://github.com/openshift/cakephp-ex).
+* [CakePHP persistent](https://raw.githubusercontent.com/openshift/cakephp-ex/master/openshift/templates/cakephp-mysql-persistent.json) - Provides a basic CakePHP application with a persistent MySQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/cakephp-ex).
+* [Dancer](https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer-mysql.json) - Provides a basic Dancer (Perl) application with a MySQL database. For more information see the [source repository](https://github.com/openshift/dancer-ex).
+* [Dancer persistent](https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer-mysql-persistent.json) - Provides a basic Dancer (Perl) application with a persistent MySQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/dancer-ex).
+* [Django](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql.json) - Provides a basic Django (Python) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/django-ex).
+* [Django persistent](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql-persistent.json) - Provides a basic Django (Python) application with a persistent PostgreSQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/django-ex).
+* [NodeJS](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb.json) - Provides a basic NodeJS application with a MongoDB database. For more information see the [source repository](https://github.com/openshift/nodejs-ex).
+* [NodeJS persistent](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb-persistent.json) - Provides a basic NodeJS application with a persistent MongoDB database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/nodejs-ex).
+* [Rails](https://raw.githubusercontent.com/openshift/rails-ex/master/openshift/templates/rails-postgresql.json) - Provides a basic Rails (Ruby) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/rails-ex).
+* [Rails persistent](https://raw.githubusercontent.com/openshift/rails-ex/master/openshift/templates/rails-postgresql-persistent.json) - Provides a basic Rails (Ruby) application with a persistent PostgreSQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/rails-ex).
+
+Note: This file is processed by `hack/update-external-examples.sh`. New examples
+must follow the exact syntax of the existing entries. Files in this directory
+are automatically pulled down, do not modify/add files to this directory.
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml
new file mode 100644
index 000000000..34f5fcbcc
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml
@@ -0,0 +1,149 @@
+apiVersion: v1
+kind: Template
+metadata:
+ creationTimestamp: null
+ name: 3scale-gateway
+ annotations:
+ description: "3scale API Gateway"
+ iconClass: "icon-load-balancer"
+ tags: "api,gateway,3scale"
+objects:
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: ${THREESCALE_GATEWAY_NAME}
+ spec:
+ replicas: 2
+ selector:
+ deploymentconfig: ${THREESCALE_GATEWAY_NAME}
+ strategy:
+ resources: {}
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: ${THREESCALE_GATEWAY_NAME}
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ valueFrom:
+ secretKeyRef:
+ name: ${THREESCALE_PORTAL_ENDPOINT_SECRET}
+ key: password
+ - name: THREESCALE_CONFIG_FILE
+ value: ${THREESCALE_CONFIG_FILE}
+ - name: RESOLVER
+ value: ${RESOLVER}
+ - name: APICAST_SERVICES
+ value: ${APICAST_SERVICES}
+ - name: APICAST_MISSING_CONFIGURATION
+ value: ${MISSING_CONFIGURATION}
+ - name: APICAST_LOG_LEVEL
+ value: ${APICAST_LOG_LEVEL}
+ - name: APICAST_PATH_ROUTING_ENABLED
+ value: ${PATH_ROUTING}
+ - name: APICAST_RESPONSE_CODES
+ value: ${RESPONSE_CODES}
+ - name: APICAST_REQUEST_LOGS
+ value: ${REQUEST_LOGS}
+ - name: APICAST_RELOAD_CONFIG
+ value: ${APICAST_RELOAD_CONFIG}
+ image: ${THREESCALE_GATEWAY_IMAGE}
+ imagePullPolicy: Always
+ name: ${THREESCALE_GATEWAY_NAME}
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: 8090
+ initialDelaySeconds: 10
+ timeoutSeconds: 1
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: 8090
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
+ triggers:
+ - type: ConfigChange
+ status: {}
+- apiVersion: v1
+ kind: Service
+ metadata:
+ creationTimestamp: null
+ name: ${THREESCALE_GATEWAY_NAME}
+ spec:
+ ports:
+ - name: 8080-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ selector:
+ deploymentconfig: ${THREESCALE_GATEWAY_NAME}
+ sessionAffinity: None
+ type: ClusterIP
+ status:
+ loadBalancer: {}
+parameters:
+- description: "Name of the secret containing the THREESCALE_PORTAL_ENDPOINT with the access-token or provider key"
+ value: threescale-portal-endpoint-secret
+ name: THREESCALE_PORTAL_ENDPOINT_SECRET
+ required: true
+- description: "Path to saved JSON file with configuration for the gateway. Has to be injected to the docker image as read only volume."
+ value:
+ name: THREESCALE_CONFIG_FILE
+ required: false
+- description: "Name for the 3scale API Gateway"
+ value: threescalegw
+ name: THREESCALE_GATEWAY_NAME
+ required: true
+- description: "Docker image to use."
+ value: 'rhamp10/apicast-gateway:1.0.0-4'
+ name: THREESCALE_GATEWAY_IMAGE
+ required: true
+- description: "DNS Resolver for openresty, if empty it will be autodiscovered"
+ value:
+ name: RESOLVER
+ required: false
+- description: "Subset of services to run. Use comma separated list of service ids (eg. 42,1337)"
+ value:
+ name: APICAST_SERVICES
+ required: false
+- description: "What to do on missing or invalid configuration. Allowed values are: log, exit."
+ value: exit
+ required: false
+ name: MISSING_CONFIGURATION
+- description: "Log level. One of the following: debug, info, notice, warn, error, crit, alert, or emerg."
+ name: APICAST_LOG_LEVEL
+ required: false
+- description: "Enable path routing. Experimental feature."
+ name: PATH_ROUTING
+ required: false
+ value: "false"
+- description: "Enable traffic logging to 3scale. Includes whole request and response."
+ value: "false"
+ name: REQUEST_LOGS
+ required: false
+- description: "Enable logging response codes to 3scale."
+ value: "false"
+ name: RESPONSE_CODES
+ required: false
+- description: "Reload config on every request"
+ value: "false"
+ name: APICAST_RELOAD_CONFIG
+ required: false
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json
new file mode 100644
index 000000000..eb3d296be
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json
@@ -0,0 +1,582 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "cakephp-mysql-persistent",
+ "annotations": {
+ "openshift.io/display-name": "CakePHP + MySQL (Persistent)",
+ "description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.",
+ "tags": "quickstart,php,cakephp",
+ "iconClass": "icon-php",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a CakePHP application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/cakephp-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
+ "labels": {
+ "template": "cakephp-mysql-persistent"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${DATABASE_USER}",
+ "database-password" : "${DATABASE_PASSWORD}",
+ "cakephp-secret-token" : "${CAKEPHP_SECRET_TOKEN}",
+ "cakephp-security-salt" : "${CAKEPHP_SECURITY_SALT}",
+ "cakephp-security-cipher-seed" : "${CAKEPHP_SECURITY_CIPHER_SEED}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "php:7.0"
+ },
+ "env": [
+ {
+ "name": "COMPOSER_MIRROR",
+ "value": "${COMPOSER_MIRROR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "./lib/Cake/Console/cake test app AllTests"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "recreateParams": {
+ "pre": {
+ "failurePolicy": "Retry",
+ "execNewPod": {
+ "command": [
+ "./migrate-database.sh"
+ ],
+ "containerName": "cakephp-mysql-persistent"
+ }
+ }
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "cakephp-mysql-persistent"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "cakephp-mysql-persistent",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/health.php",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "value": "${DATABASE_ENGINE}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "DATABASE_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "CAKEPHP_SECRET_TOKEN",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "cakephp-secret-token"
+ }
+ }
+ },
+ {
+ "name": "CAKEPHP_SECURITY_SALT",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "cakephp-security-salt"
+ }
+ }
+ },
+ {
+ "name": "CAKEPHP_SECURITY_CIPHER_SEED",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "cakephp-security-cipher-seed"
+ }
+ }
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "value": "${OPCACHE_REVALIDATE_FREQ}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "mysql:5.7"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "mysql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 3306
+ }
+ ],
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "MYSQL_PWD='${DATABASE_PASSWORD}' mysql -h 127.0.0.1 -u ${DATABASE_USER} -D ${DATABASE_NAME} -e 'SELECT 1'" ]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ }
+ },
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_MYSQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "cakephp-mysql-persistent"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the CakePHP container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_MYSQL_LIMIT",
+ "displayName": "Memory Limit (MySQL)",
+ "description": "Maximum amount of memory the MySQL container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/openshift/cakephp-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the CakePHP service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "required": true,
+ "value": "mysql"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "displayName": "Database Engine",
+ "description": "Database engine: postgresql, mysql or sqlite (default).",
+ "required": true,
+ "value": "mysql"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "displayName": "Database Name",
+ "required": true,
+ "value": "default"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database User",
+ "required": true,
+ "value": "cakephp"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "CAKEPHP_SECRET_TOKEN",
+ "displayName": "CakePHP secret token",
+ "description": "Set this to a long random string.",
+ "generate": "expression",
+ "from": "[\\w]{50}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_SALT",
+ "displayName": "CakePHP Security Salt",
+ "description": "Security salt for session hash.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_CIPHER_SEED",
+ "displayName": "CakePHP Security Cipher Seed",
+ "description": "Security cipher seed for session hash.",
+ "generate": "expression",
+ "from": "[0-9]{30}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "displayName": "OPcache Revalidation Frequency",
+ "description": "How often to check script timestamps for updates, in seconds. 0 will result in OPcache checking for updates on every request.",
+ "value": "2"
+ },
+ {
+ "name": "COMPOSER_MIRROR",
+ "displayName": "Custom Composer Mirror URL",
+ "description": "The custom Composer mirror URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json
new file mode 100644
index 000000000..da2454d2e
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json
@@ -0,0 +1,556 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "cakephp-mysql-example",
+ "annotations": {
+ "openshift.io/display-name": "CakePHP + MySQL (Ephemeral)",
+ "description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,php,cakephp",
+ "iconClass": "icon-php",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a CakePHP application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/cakephp-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
+ "labels": {
+ "template": "cakephp-mysql-example"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${DATABASE_USER}",
+ "database-password" : "${DATABASE_PASSWORD}",
+ "cakephp-secret-token" : "${CAKEPHP_SECRET_TOKEN}",
+ "cakephp-security-salt" : "${CAKEPHP_SECURITY_SALT}",
+ "cakephp-security-cipher-seed" : "${CAKEPHP_SECURITY_CIPHER_SEED}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "php:7.0"
+ },
+ "env": [
+ {
+ "name": "COMPOSER_MIRROR",
+ "value": "${COMPOSER_MIRROR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "./lib/Cake/Console/cake test app AllTests"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "recreateParams": {
+ "pre": {
+ "failurePolicy": "Retry",
+ "execNewPod": {
+ "command": [
+ "./migrate-database.sh"
+ ],
+ "containerName": "cakephp-mysql-example"
+ }
+ }
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "cakephp-mysql-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "cakephp-mysql-example",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/health.php",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "value": "${DATABASE_ENGINE}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "DATABASE_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "CAKEPHP_SECRET_TOKEN",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "cakephp-secret-token"
+ }
+ }
+ },
+ {
+ "name": "CAKEPHP_SECURITY_SALT",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "cakephp-security-salt"
+ }
+ }
+ },
+ {
+ "name": "CAKEPHP_SECURITY_CIPHER_SEED",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "cakephp-security-cipher-seed"
+ }
+ }
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "value": "${OPCACHE_REVALIDATE_FREQ}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "mysql:5.7"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "data",
+ "emptyDir": {}
+ }
+ ],
+ "containers": [
+ {
+ "name": "mysql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 3306
+ }
+ ],
+ "volumeMounts": [
+ {
+ "name": "data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "MYSQL_PWD='${DATABASE_PASSWORD}' mysql -h 127.0.0.1 -u ${DATABASE_USER} -D ${DATABASE_NAME} -e 'SELECT 1'" ]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ }
+ },
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_MYSQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "cakephp-mysql-example"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the CakePHP container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_MYSQL_LIMIT",
+ "displayName": "Memory Limit (MySQL)",
+ "description": "Maximum amount of memory the MySQL container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/openshift/cakephp-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the CakePHP service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "required": true,
+ "value": "mysql"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "displayName": "Database Engine",
+ "description": "Database engine: postgresql, mysql or sqlite (default).",
+ "required": true,
+ "value": "mysql"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "displayName": "Database Name",
+ "required": true,
+ "value": "default"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database User",
+ "required": true,
+ "value": "cakephp"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "CAKEPHP_SECRET_TOKEN",
+ "displayName": "CakePHP secret token",
+ "description": "Set this to a long random string.",
+ "generate": "expression",
+ "from": "[\\w]{50}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_SALT",
+ "displayName": "CakePHP Security Salt",
+ "description": "Security salt for session hash.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_CIPHER_SEED",
+ "displayName": "CakePHP Security Cipher Seed",
+ "description": "Security cipher seed for session hash.",
+ "generate": "expression",
+ "from": "[0-9]{30}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "displayName": "OPcache Revalidation Frequency",
+ "description": "How often to check script timestamps for updates, in seconds. 0 will result in OPcache checking for updates on every request.",
+ "value": "2"
+ },
+ {
+ "name": "COMPOSER_MIRROR",
+ "displayName": "Custom Composer Mirror URL",
+ "description": "The custom Composer mirror URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json
new file mode 100644
index 000000000..81ae63416
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json
@@ -0,0 +1,523 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dancer-mysql-persistent",
+ "annotations": {
+ "openshift.io/display-name": "Dancer + MySQL (Persistent)",
+ "description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
+ "tags": "quickstart,perl,dancer",
+ "iconClass": "icon-perl",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Dancer based application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/dancer-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
+ "labels": {
+ "template": "dancer-mysql-persistent"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${DATABASE_USER}",
+ "database-password" : "${DATABASE_PASSWORD}",
+ "keybase" : "${SECRET_KEY_BASE}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "perl:5.24"
+ },
+ "env": [
+ {
+ "name": "CPAN_MIRROR",
+ "value": "${CPAN_MIRROR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "perl -I extlib/lib/perl5 -I lib t/*"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "dancer-mysql-persistent"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "dancer-mysql-persistent",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/health",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "keybase"
+ }
+ }
+ },
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "value": "${PERL_APACHE2_RELOAD}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "mysql:5.7"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "mysql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 3306
+ }
+ ],
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "MYSQL_PWD='${DATABASE_PASSWORD}' mysql -h 127.0.0.1 -u ${DATABASE_USER} -D ${DATABASE_NAME} -e 'SELECT 1'" ]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ }
+ },
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_MYSQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "dancer-mysql-persistent"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the Perl Dancer container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_MYSQL_LIMIT",
+ "displayName": "Memory Limit (MySQL)",
+ "description": "Maximum amount of memory the MySQL container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/openshift/dancer-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the Dancer service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "required": true,
+ "value": "database"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database Username",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{8}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "displayName": "Database Name",
+ "required": true,
+ "value": "sampledb"
+ },
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "displayName": "Perl Module Reload",
+ "description": "Set this to \"true\" to enable automatic reloading of modified Perl modules.",
+ "value": ""
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "displayName": "Secret Key",
+ "description": "Your secret key for verifying the integrity of signed cookies.",
+ "generate": "expression",
+ "from": "[a-z0-9]{127}"
+ },
+ {
+ "name": "CPAN_MIRROR",
+ "displayName": "Custom CPAN Mirror URL",
+ "description": "The custom CPAN mirror URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json
new file mode 100644
index 000000000..7a285dba8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json
@@ -0,0 +1,497 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dancer-mysql-example",
+ "annotations": {
+ "openshift.io/display-name": "Dancer + MySQL (Ephemeral)",
+ "description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,perl,dancer",
+ "iconClass": "icon-perl",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Dancer based application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/dancer-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
+ "labels": {
+ "template": "dancer-mysql-example"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${DATABASE_USER}",
+ "database-password" : "${DATABASE_PASSWORD}",
+ "keybase" : "${SECRET_KEY_BASE}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "perl:5.24"
+ },
+ "env": [
+ {
+ "name": "CPAN_MIRROR",
+ "value": "${CPAN_MIRROR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "perl -I extlib/lib/perl5 -I lib t/*"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "dancer-mysql-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "dancer-mysql-example",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/health",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "keybase"
+ }
+ }
+ },
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "value": "${PERL_APACHE2_RELOAD}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "mysql:5.7"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "data",
+ "emptyDir": {}
+ }
+ ],
+ "containers": [
+ {
+ "name": "mysql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 3306
+ }
+ ],
+ "volumeMounts": [
+ {
+ "name": "data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "MYSQL_PWD='${DATABASE_PASSWORD}' mysql -h 127.0.0.1 -u ${DATABASE_USER} -D ${DATABASE_NAME} -e 'SELECT 1'" ]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ }
+ },
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_MYSQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "dancer-mysql-example"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the Perl Dancer container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_MYSQL_LIMIT",
+ "displayName": "Memory Limit (MySQL)",
+ "description": "Maximum amount of memory the MySQL container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/openshift/dancer-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the Dancer service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "required": true,
+ "value": "database"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database Username",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{8}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "displayName": "Database Name",
+ "required": true,
+ "value": "sampledb"
+ },
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "displayName": "Perl Module Reload",
+ "description": "Set this to \"true\" to enable automatic reloading of modified Perl modules.",
+ "value": ""
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "displayName": "Secret Key",
+ "description": "Your secret key for verifying the integrity of signed cookies.",
+ "generate": "expression",
+ "from": "[a-z0-9]{127}"
+ },
+ {
+ "name": "CPAN_MIRROR",
+ "displayName": "Custom CPAN Mirror URL",
+ "description": "The custom CPAN mirror URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json
new file mode 100644
index 000000000..9f982c286
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json
@@ -0,0 +1,536 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "django-psql-persistent",
+ "annotations": {
+ "openshift.io/display-name": "Django + PostgreSQL (Persistent)",
+ "description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
+ "tags": "quickstart,python,django",
+ "iconClass": "icon-python",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Django based application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/django-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
+ "labels": {
+ "template": "django-psql-persistent"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${DATABASE_USER}",
+ "database-password" : "${DATABASE_PASSWORD}",
+ "django-secret-key" : "${DJANGO_SECRET_KEY}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "python:3.5"
+ },
+ "env": [
+ {
+ "name": "PIP_INDEX_URL",
+ "value": "${PIP_INDEX_URL}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "./manage.py test"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "django-psql-persistent"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "django-psql-persistent",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/health",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/health",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "value": "${DATABASE_ENGINE}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "DATABASE_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "APP_CONFIG",
+ "value": "${APP_CONFIG}"
+ },
+ {
+ "name": "DJANGO_SECRET_KEY",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "django-secret-key"
+ }
+ }
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "postgresql:9.5"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 5432
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 5432
+ }
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_POSTGRESQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "django-psql-persistent"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "required": true,
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "required": true,
+ "description": "Maximum amount of memory the Django container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_POSTGRESQL_LIMIT",
+ "displayName": "Memory Limit (PostgreSQL)",
+ "required": true,
+ "description": "Maximum amount of memory the PostgreSQL container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "description": "The URL of the repository with your application source code.",
+ "value": "https://github.com/openshift/django-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the Django service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "required": true,
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "displayName": "Database Engine",
+ "required": true,
+ "description": "Database engine: postgresql, mysql or sqlite (default).",
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "displayName": "Database Name",
+ "required": true,
+ "value": "default"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database Username",
+ "required": true,
+ "value": "django"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database User Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "APP_CONFIG",
+ "displayName": "Application Configuration File Path",
+ "description": "Relative path to Gunicorn configuration file (optional)."
+ },
+ {
+ "name": "DJANGO_SECRET_KEY",
+ "displayName": "Django Secret Key",
+ "description": "Set this to a long random string.",
+ "generate": "expression",
+ "from": "[\\w]{50}"
+ },
+ {
+ "name": "PIP_INDEX_URL",
+ "displayName": "Custom PyPi Index URL",
+ "description": "The custom PyPi index URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json
new file mode 100644
index 000000000..7bee85ddd
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json
@@ -0,0 +1,510 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "django-psql-example",
+ "annotations": {
+ "openshift.io/display-name": "Django + PostgreSQL (Ephemeral)",
+ "description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,python,django",
+ "iconClass": "icon-python",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Django based application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/django-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
+ "labels": {
+ "template": "django-psql-example"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${DATABASE_USER}",
+ "database-password" : "${DATABASE_PASSWORD}",
+ "django-secret-key" : "${DJANGO_SECRET_KEY}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "python:3.5"
+ },
+ "env": [
+ {
+ "name": "PIP_INDEX_URL",
+ "value": "${PIP_INDEX_URL}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "./manage.py test"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "django-psql-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "django-psql-example",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/health",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/health",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "value": "${DATABASE_ENGINE}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "DATABASE_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "APP_CONFIG",
+ "value": "${APP_CONFIG}"
+ },
+ {
+ "name": "DJANGO_SECRET_KEY",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "django-secret-key"
+ }
+ }
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "postgresql:9.5"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "data",
+ "emptyDir": {}
+ }
+ ],
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 5432
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "name": "data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 5432
+ }
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_POSTGRESQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "django-psql-example"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "required": true,
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "required": true,
+ "description": "Maximum amount of memory the Django container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_POSTGRESQL_LIMIT",
+ "displayName": "Memory Limit (PostgreSQL)",
+ "required": true,
+ "description": "Maximum amount of memory the PostgreSQL container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "description": "The URL of the repository with your application source code.",
+ "value": "https://github.com/openshift/django-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the Django service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "required": true,
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "displayName": "Database Engine",
+ "required": true,
+ "description": "Database engine: postgresql, mysql or sqlite (default).",
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "displayName": "Database Name",
+ "required": true,
+ "value": "default"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database Username",
+ "required": true,
+ "value": "django"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database User Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "APP_CONFIG",
+ "displayName": "Application Configuration File Path",
+ "description": "Relative path to Gunicorn configuration file (optional)."
+ },
+ {
+ "name": "DJANGO_SECRET_KEY",
+ "displayName": "Django Secret Key",
+ "description": "Set this to a long random string.",
+ "generate": "expression",
+ "from": "[\\w]{50}"
+ },
+ {
+ "name": "PIP_INDEX_URL",
+ "displayName": "Custom PyPi Index URL",
+ "description": "The custom PyPi index URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json
new file mode 100644
index 000000000..a09d71a00
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json
@@ -0,0 +1,333 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dotnet-example",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core",
+ "description": "An example .NET Core application.",
+ "tags": "quickstart,dotnet,.net",
+ "iconClass": "icon-dotnet",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/redhat-developer/s2i-dotnetcore",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "objects": [
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "${DOTNET_IMAGE_STREAM_TAG}"
+ },
+ "env": [
+ {
+ "name": "DOTNET_STARTUP_PROJECT",
+ "value": "${DOTNET_STARTUP_PROJECT}"
+ },
+ {
+ "name": "DOTNET_ASSEMBLY_NAME",
+ "value": "${DOTNET_ASSEMBLY_NAME}"
+ },
+ {
+ "name": "DOTNET_NPM_TOOLS",
+ "value": "${DOTNET_NPM_TOOLS}"
+ },
+ {
+ "name": "DOTNET_TEST_PROJECTS",
+ "value": "${DOTNET_TEST_PROJECTS}"
+ },
+ {
+ "name": "DOTNET_CONFIGURATION",
+ "value": "${DOTNET_CONFIGURATION}"
+ },
+ {
+ "name": "DOTNET_PUBLISH",
+ "value": "true"
+ },
+ {
+ "name": "DOTNET_RESTORE_SOURCES",
+ "value": "${DOTNET_RESTORE_SOURCES}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "dotnet-app"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "dotnet-app",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "livenessProbe": {
+ "httpGet": {
+ "path": "/",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 40,
+ "timeoutSeconds": 15
+ },
+ "readinessProbe": {
+ "httpGet": {
+ "path": "/",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 10,
+ "timeoutSeconds": 30
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "env": []
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "dotnet-example"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "DOTNET_IMAGE_STREAM_TAG",
+ "displayName": ".NET builder",
+ "required": true,
+ "description": "The image stream tag which is used to build the code.",
+ "value": "dotnet:1.0"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "value": "dotnetcore-1.0"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to use a subdirectory of the source code repository"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the .NET Core service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DOTNET_STARTUP_PROJECT",
+ "displayName": "Startup Project",
+ "description": "Set this to the folder containing your startup project.",
+ "value": "app"
+ },
+ {
+ "name": "DOTNET_ASSEMBLY_NAME",
+ "displayName": "Startup Assembly",
+ "description": "Set this when the assembly name is overridden in the project file."
+ },
+ {
+ "name": "DOTNET_NPM_TOOLS",
+ "displayName": "Npm Tools",
+ "description": "Set this to a space separated list of npm tools needed to publish.",
+ "value": "bower gulp"
+ },
+ {
+ "name": "DOTNET_TEST_PROJECTS",
+ "displayName": "Test projects",
+ "description": "Set this to a space separated list of test projects to run before publishing."
+ },
+ {
+ "name": "DOTNET_CONFIGURATION",
+ "displayName": "Configuration",
+ "description": "Set this to configuration (Release/Debug).",
+ "value": "Release"
+ },
+ {
+ "name": "DOTNET_RESTORE_SOURCES",
+ "displayName": "NuGet package sources",
+ "description": "Set this to override the NuGet.config sources."
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json
new file mode 100644
index 000000000..fa31f7f61
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json
@@ -0,0 +1,544 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dotnet-pgsql-persistent",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core + PostgreSQL (Persistent)",
+ "description": "An example .NET Core application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore.",
+ "tags": "quickstart,dotnet",
+ "iconClass": "icon-dotnet",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/redhat-developer/s2i-dotnetcore",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore.",
+ "labels": {
+ "template": "dotnet-pgsql-persistent"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "${DOTNET_IMAGE_STREAM_TAG}"
+ },
+ "env": [
+ {
+ "name": "DOTNET_STARTUP_PROJECT",
+ "value": "${DOTNET_STARTUP_PROJECT}"
+ },
+ {
+ "name": "DOTNET_ASSEMBLY_NAME",
+ "value": "${DOTNET_ASSEMBLY_NAME}"
+ },
+ {
+ "name": "DOTNET_NPM_TOOLS",
+ "value": "${DOTNET_NPM_TOOLS}"
+ },
+ {
+ "name": "DOTNET_TEST_PROJECTS",
+ "value": "${DOTNET_TEST_PROJECTS}"
+ },
+ {
+ "name": "DOTNET_CONFIGURATION",
+ "value": "${DOTNET_CONFIGURATION}"
+ },
+ {
+ "name": "DOTNET_PUBLISH",
+ "value": "true"
+ },
+ {
+ "name": "DOTNET_RESTORE_SOURCES",
+ "value": "${DOTNET_RESTORE_SOURCES}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {}
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "updatePeriodSeconds": 1,
+ "intervalSeconds": 1,
+ "timeoutSeconds": 600,
+ "maxUnavailable": "25%",
+ "maxSurge": "25%"
+ },
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "dotnet-pgsql-persistent"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "dotnet-pgsql-persistent",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "env": [
+ {
+ "name": "ConnectionString",
+ "value": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "livenessProbe": {
+ "httpGet": {
+ "path": "/",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 40,
+ "timeoutSeconds": 10
+ },
+ "readinessProbe": {
+ "httpGet": {
+ "path": "/",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 10,
+ "timeoutSeconds": 30
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "openshift",
+ "name": "postgresql:9.5"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 5432
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [
+ "/bin/sh",
+ "-i",
+ "-c",
+ "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"
+ ]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 5432
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DATABASE_USER}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DATABASE_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_POSTGRESQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "musicstore"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "required": true,
+ "description": "Maximum amount of memory the .NET Core container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_POSTGRESQL_LIMIT",
+ "displayName": "Memory Limit (PostgreSQL)",
+ "required": true,
+ "description": "Maximum amount of memory the PostgreSQL container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "DOTNET_IMAGE_STREAM_TAG",
+ "displayName": ".NET builder",
+ "required": true,
+ "description": "The image stream tag which is used to build the code.",
+ "value": "dotnet:1.1"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "required": true,
+ "description": "The OpenShift Namespace where the .NET builder ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "description": "The URL of the repository with your application source code.",
+ "value": "https://github.com/redhat-developer/s2i-aspnet-musicstore-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "value": "rel/1.1-example"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "DOTNET_STARTUP_PROJECT",
+ "displayName": "Startup Project",
+ "description": "Set this to the folder containing your startup project.",
+ "value": "samples/MusicStore"
+ },
+ {
+ "name": "DOTNET_ASSEMBLY_NAME",
+ "displayName": "Startup Assembly",
+ "description": "Set this when the assembly name is overridden in the project file."
+ },
+ {
+ "name": "DOTNET_NPM_TOOLS",
+ "displayName": "Npm Tools",
+ "description": "Set this to a space separated list of npm tools needed to publish."
+ },
+ {
+ "name": "DOTNET_TEST_PROJECTS",
+ "displayName": "Test projects",
+ "description": "Set this to a space separated list of test projects to run before publishing."
+ },
+ {
+ "name": "DOTNET_CONFIGURATION",
+ "displayName": "Configuration",
+ "description": "Set this to configuration (Release/Debug).",
+ "value": "Release"
+ },
+ {
+ "name": "DOTNET_RESTORE_SOURCES",
+ "displayName": "NuGet package sources",
+ "description": "Set this to override the NuGet.config sources."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the .NET Core service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "required": true,
+ "displayName": "Database Service Name",
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database Username",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{8}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "required": true,
+ "displayName": "Database Name",
+ "value": "musicstore"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "displayName": "Maximum Database Connections",
+ "value": "100"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "displayName": "Shared Buffer Amount",
+ "value": "12MB"
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json
new file mode 100644
index 000000000..264e4b2de
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json
@@ -0,0 +1,289 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins-ephemeral",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "Jenkins (Ephemeral)",
+ "description": "Jenkins service, without persistent storage.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "iconClass": "icon-jenkins",
+ "tags": "instant-app,jenkins",
+ "template.openshift.io/long-description": "This template deploys a Jenkins server capable of managing OpenShift Pipeline builds and supporting OpenShift-based oauth login. The Jenkins configuration is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/other_images/jenkins.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "objects": [
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "tls": {
+ "termination": "edge",
+ "insecureEdgeTerminationPolicy": "Redirect"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "jenkins"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${JENKINS_IMAGE_STREAM_TAG}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${JENKINS_SERVICE_NAME}",
+ "containers": [
+ {
+ "name": "jenkins",
+ "image": " ",
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/login",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 420,
+ "failureThreshold" : 30,
+ "httpGet": {
+ "path": "/login",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "OPENSHIFT_ENABLE_OAUTH",
+ "value": "${ENABLE_OAUTH}"
+ },
+ {
+ "name": "OPENSHIFT_ENABLE_REDIRECT_PROMPT",
+ "value": "true"
+ },
+ {
+ "name": "OPENSHIFT_JENKINS_JVM_ARCH",
+ "value": "${JVM_ARCH}"
+ },
+ {
+ "name": "KUBERNETES_MASTER",
+ "value": "https://kubernetes.default:443"
+ },
+ {
+ "name": "KUBERNETES_TRUST_CERTIFICATES",
+ "value": "true"
+ },
+ {
+ "name": "JNLP_SERVICE_NAME",
+ "value": "${JNLP_SERVICE_NAME}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${JENKINS_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/jenkins"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${JENKINS_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ }
+ },
+ {
+ "kind": "ServiceAccount",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "annotations": {
+ "serviceaccounts.openshift.io/oauth-redirectreference.jenkins": "{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"${JENKINS_SERVICE_NAME}\"}}"
+ }
+ }
+ },
+ {
+ "kind": "RoleBinding",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}_edit"
+ },
+ "groupNames": null,
+ "subjects": [
+ {
+ "kind": "ServiceAccount",
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ ],
+ "roleRef": {
+ "name": "edit"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JNLP_SERVICE_NAME}"
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "agent",
+ "protocol": "TCP",
+ "port": 50000,
+ "targetPort": 50000,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "annotations": {
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${JNLP_SERVICE_NAME}\", \"namespace\": \"\", \"kind\": \"Service\"}]",
+ "service.openshift.io/infrastructure": "true"
+ },
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "protocol": "TCP",
+ "port": 80,
+ "targetPort": 8080,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "JENKINS_SERVICE_NAME",
+ "displayName": "Jenkins Service Name",
+ "description": "The name of the OpenShift Service exposed for the Jenkins container.",
+ "value": "jenkins"
+ },
+ {
+ "name": "JNLP_SERVICE_NAME",
+ "displayName": "Jenkins JNLP Service Name",
+ "description": "The name of the service used for master/slave communication.",
+ "value": "jenkins-jnlp"
+ },
+ {
+ "name": "ENABLE_OAUTH",
+ "displayName": "Enable OAuth in Jenkins",
+ "description": "Whether to enable OAuth OpenShift integration. If false, the static account 'admin' will be initialized with the password 'password'.",
+ "value": "true"
+ },
+ {
+ "name": "JVM_ARCH",
+ "displayName": "Jenkins JVM Architecture",
+ "description": "Whether Jenkins runs with a 32 bit (i386) or 64 bit (x86_64) JVM.",
+ "value": "i386"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Jenkins ImageStream Namespace",
+ "description": "The OpenShift Namespace where the Jenkins ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "JENKINS_IMAGE_STREAM_TAG",
+ "displayName": "Jenkins ImageStreamTag",
+ "description": "Name of the ImageStreamTag to be used for the Jenkins image.",
+ "value": "jenkins:latest"
+ }
+ ],
+ "labels": {
+ "template": "jenkins-ephemeral-template"
+ }
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json
new file mode 100644
index 000000000..b47bdf353
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json
@@ -0,0 +1,313 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins-persistent",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "Jenkins (Persistent)",
+ "description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
+ "iconClass": "icon-jenkins",
+ "tags": "instant-app,jenkins",
+ "template.openshift.io/long-description": "This template deploys a Jenkins server capable of managing OpenShift Pipeline builds and supporting OpenShift-based oauth login.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/other_images/jenkins.html",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "objects": [
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "tls": {
+ "termination": "edge",
+ "insecureEdgeTerminationPolicy": "Redirect"
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "jenkins"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${JENKINS_IMAGE_STREAM_TAG}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${JENKINS_SERVICE_NAME}",
+ "containers": [
+ {
+ "name": "jenkins",
+ "image": " ",
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/login",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 420,
+ "failureThreshold" : 30,
+ "httpGet": {
+ "path": "/login",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "OPENSHIFT_ENABLE_OAUTH",
+ "value": "${ENABLE_OAUTH}"
+ },
+ {
+ "name": "OPENSHIFT_ENABLE_REDIRECT_PROMPT",
+ "value": "true"
+ },
+ {
+ "name": "OPENSHIFT_JENKINS_JVM_ARCH",
+ "value": "${JVM_ARCH}"
+ },
+ {
+ "name": "KUBERNETES_MASTER",
+ "value": "https://kubernetes.default:443"
+ },
+ {
+ "name": "KUBERNETES_TRUST_CERTIFICATES",
+ "value": "true"
+ },
+ {
+ "name": "JNLP_SERVICE_NAME",
+ "value": "${JNLP_SERVICE_NAME}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${JENKINS_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/jenkins"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${JENKINS_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${JENKINS_SERVICE_NAME}"
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ }
+ },
+ {
+ "kind": "ServiceAccount",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "annotations": {
+ "serviceaccounts.openshift.io/oauth-redirectreference.jenkins": "{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"${JENKINS_SERVICE_NAME}\"}}"
+ }
+ }
+ },
+ {
+ "kind": "RoleBinding",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}_edit"
+ },
+ "groupNames": null,
+ "subjects": [
+ {
+ "kind": "ServiceAccount",
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ ],
+ "roleRef": {
+ "name": "edit"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JNLP_SERVICE_NAME}"
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "agent",
+ "protocol": "TCP",
+ "port": 50000,
+ "targetPort": 50000,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "annotations": {
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${JNLP_SERVICE_NAME}\", \"namespace\": \"\", \"kind\": \"Service\"}]",
+ "service.openshift.io/infrastructure": "true"
+ },
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "protocol": "TCP",
+ "port": 80,
+ "targetPort": 8080,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "JENKINS_SERVICE_NAME",
+ "displayName": "Jenkins Service Name",
+ "description": "The name of the OpenShift Service exposed for the Jenkins container.",
+ "value": "jenkins"
+ },
+ {
+ "name": "JNLP_SERVICE_NAME",
+ "displayName": "Jenkins JNLP Service Name",
+ "description": "The name of the service used for master/slave communication.",
+ "value": "jenkins-jnlp"
+ },
+ {
+ "name": "ENABLE_OAUTH",
+ "displayName": "Enable OAuth in Jenkins",
+ "description": "Whether to enable OAuth OpenShift integration. If false, the static account 'admin' will be initialized with the password 'password'.",
+ "value": "true"
+ },
+ {
+ "name": "JVM_ARCH",
+ "displayName": "Jenkins JVM Architecture",
+ "description": "Whether Jenkins runs with a 32 bit (i386) or 64 bit (x86_64) JVM.",
+ "value": "i386"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Jenkins ImageStream Namespace",
+ "description": "The OpenShift Namespace where the Jenkins ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "JENKINS_IMAGE_STREAM_TAG",
+ "displayName": "Jenkins ImageStreamTag",
+ "description": "Name of the ImageStreamTag to be used for the Jenkins image.",
+ "value": "jenkins:latest"
+ }
+ ],
+ "labels": {
+ "template": "jenkins-persistent-template"
+ }
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json
new file mode 100644
index 000000000..6ee999cb1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json
@@ -0,0 +1,545 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs-mongo-persistent",
+ "annotations": {
+ "openshift.io/display-name": "Node.js + MongoDB (Persistent)",
+ "description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
+ "tags": "quickstart,nodejs",
+ "iconClass": "icon-nodejs",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a NodeJS application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/nodejs-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
+ "labels": {
+ "template": "nodejs-mongo-persistent"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData": {
+ "database-user": "${DATABASE_USER}",
+ "database-password": "${DATABASE_PASSWORD}",
+ "database-admin-password" : "${DATABASE_ADMIN_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "nodejs:4"
+ },
+ "env": [
+ {
+ "name": "NPM_MIRROR",
+ "value": "${NPM_MIRROR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "npm test"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "nodejs-mongo-persistent"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "nodejs-mongo-persistent",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "MONGODB_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-admin-password"
+ }
+ }
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/pagecount",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/pagecount",
+ "port": 8080
+ }
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mongodb",
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "mongodb:3.2"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mongodb",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 27017
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-admin-password"
+ }
+ }
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 3,
+ "exec": {
+ "command": [
+ "/bin/sh",
+ "-i",
+ "-c",
+ "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""
+ ]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 27017
+ }
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_MONGODB_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mongodb/data"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "nodejs-mongo-persistent"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the Node.js container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_MONGODB_LIMIT",
+ "displayName": "Memory Limit (MongoDB)",
+ "description": "Maximum amount of memory the MongoDB container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/openshift/nodejs-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the Node.js service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "required": true,
+ "value": "mongodb"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "MongoDB Username",
+ "description": "Username for MongoDB user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "MongoDB Password",
+ "description": "Password for the MongoDB user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "displayName": "Database Name",
+ "required": true,
+ "value": "sampledb"
+ },
+ {
+ "name": "DATABASE_ADMIN_PASSWORD",
+ "displayName": "Database Administrator Password",
+ "description": "Password for the database admin user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "NPM_MIRROR",
+ "displayName": "Custom NPM Mirror URL",
+ "description": "The custom NPM mirror URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json
new file mode 100644
index 000000000..5c177a7e0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json
@@ -0,0 +1,521 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs-mongodb-example",
+ "annotations": {
+ "openshift.io/display-name": "Node.js + MongoDB (Ephemeral)",
+ "description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,nodejs",
+ "iconClass": "icon-nodejs",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a NodeJS application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/nodejs-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
+ "labels": {
+ "template": "nodejs-mongodb-example"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData": {
+ "database-user": "${DATABASE_USER}",
+ "database-password": "${DATABASE_PASSWORD}",
+ "database-admin-password" : "${DATABASE_ADMIN_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "nodejs:4"
+ },
+ "env": [
+ {
+ "name": "NPM_MIRROR",
+ "value": "${NPM_MIRROR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "npm test"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "nodejs-mongodb-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "nodejs-mongodb-example",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "MONGODB_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-admin-password"
+ }
+ }
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/pagecount",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/pagecount",
+ "port": 8080
+ }
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mongodb",
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "mongodb:3.2"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mongodb",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 27017
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-admin-password"
+ }
+ }
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 3,
+ "exec": {
+ "command": [
+ "/bin/sh",
+ "-i",
+ "-c",
+ "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""
+ ]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 27017
+ }
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_MONGODB_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mongodb/data"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "nodejs-mongodb-example"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the Node.js container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_MONGODB_LIMIT",
+ "displayName": "Memory Limit (MongoDB)",
+ "description": "Maximum amount of memory the MongoDB container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/openshift/nodejs-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the Node.js service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "required": true,
+ "value": "mongodb"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "MongoDB Username",
+ "description": "Username for MongoDB user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "MongoDB Password",
+ "description": "Password for the MongoDB user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "displayName": "Database Name",
+ "required": true,
+ "value": "sampledb"
+ },
+ {
+ "name": "DATABASE_ADMIN_PASSWORD",
+ "displayName": "Database Administrator Password",
+ "description": "Password for the database admin user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "NPM_MIRROR",
+ "displayName": "Custom NPM Mirror URL",
+ "description": "The custom NPM mirror URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json
new file mode 100644
index 000000000..b400cfdb3
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json
@@ -0,0 +1,602 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "rails-pgsql-persistent",
+ "annotations": {
+ "openshift.io/display-name": "Rails + PostgreSQL (Persistent)",
+ "description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
+ "tags": "quickstart,ruby,rails",
+ "iconClass": "icon-ruby",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Rails application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/rails-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
+ "labels": {
+ "template": "rails-pgsql-persistent"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${DATABASE_USER}",
+ "database-password" : "${DATABASE_PASSWORD}",
+ "application-user" : "${APPLICATION_USER}",
+ "application-password" : "${APPLICATION_PASSWORD}",
+ "keybase" : "${SECRET_KEY_BASE}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "ruby:2.3"
+ },
+ "env": [
+ {
+ "name": "RUBYGEM_MIRROR",
+ "value": "${RUBYGEM_MIRROR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "bundle exec rake test"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "recreateParams": {
+ "pre": {
+ "failurePolicy": "Abort",
+ "execNewPod": {
+ "command": [
+ "./migrate-database.sh"
+ ],
+ "containerName": "${NAME}"
+ }
+ }
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${NAME}",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 5,
+ "httpGet": {
+ "path": "/articles",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 10,
+ "httpGet": {
+ "path": "/articles",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "POSTGRESQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "keybase"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "value": "${APPLICATION_DOMAIN}"
+ },
+ {
+ "name": "APPLICATION_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "application-user"
+ }
+ }
+ },
+ {
+ "name": "APPLICATION_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "application-password"
+ }
+ }
+ },
+ {
+ "name": "RAILS_ENV",
+ "value": "${RAILS_ENV}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "postgresql:9.5"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 5432
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 5432
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_POSTGRESQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "rails-pgsql-persistent"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "required": true,
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "required": true,
+ "description": "Maximum amount of memory the Rails container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_POSTGRESQL_LIMIT",
+ "displayName": "Memory Limit (PostgreSQL)",
+ "required": true,
+ "description": "Maximum amount of memory the PostgreSQL container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "description": "The URL of the repository with your application source code.",
+ "value": "https://github.com/openshift/rails-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the Rails service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "displayName": "Secret Key",
+ "description": "Your secret key for verifying the integrity of signed cookies.",
+ "generate": "expression",
+ "from": "[a-z0-9]{127}"
+ },
+ {
+ "name": "APPLICATION_USER",
+ "displayName": "Application Username",
+ "required": true,
+ "description": "The application user that is used within the sample application to authorize access on pages.",
+ "value": "openshift"
+ },
+ {
+ "name": "APPLICATION_PASSWORD",
+ "displayName": "Application Password",
+ "required": true,
+ "description": "The application password that is used within the sample application to authorize access on pages.",
+ "value": "secret"
+ },
+ {
+ "name": "RAILS_ENV",
+ "displayName": "Rails Environment",
+ "required": true,
+ "description": "Environment under which the sample application will run. Could be set to production, development or test.",
+ "value": "production"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "required": true,
+ "displayName": "Database Service Name",
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database Username",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{8}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "required": true,
+ "displayName": "Database Name",
+ "value": "root"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "displayName": "Maximum Database Connections",
+ "value": "100"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "displayName": "Shared Buffer Amount",
+ "value": "12MB"
+ },
+ {
+ "name": "RUBYGEM_MIRROR",
+ "displayName": "Custom RubyGems Mirror URL",
+ "description": "The custom RubyGems mirror URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json
new file mode 100644
index 000000000..fa67412ff
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json
@@ -0,0 +1,576 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "rails-postgresql-example",
+ "annotations": {
+ "openshift.io/display-name": "Rails + PostgreSQL (Ephemeral)",
+ "description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,ruby,rails",
+ "iconClass": "icon-ruby",
+ "template.openshift.io/long-description": "This template defines resources needed to develop a Rails application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "template.openshift.io/provider-display-name": "Red Hat, Inc.",
+ "template.openshift.io/documentation-url": "https://github.com/openshift/rails-ex",
+ "template.openshift.io/support-url": "https://access.redhat.com"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
+ "labels": {
+ "template": "rails-postgresql-example"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${DATABASE_USER}",
+ "database-password" : "${DATABASE_PASSWORD}",
+ "application-user" : "${APPLICATION_USER}",
+ "application-password" : "${APPLICATION_PASSWORD}",
+ "keybase" : "${SECRET_KEY_BASE}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "ruby:2.3"
+ },
+ "env": [
+ {
+ "name": "RUBYGEM_MIRROR",
+ "value": "${RUBYGEM_MIRROR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "bundle exec rake test"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "recreateParams": {
+ "pre": {
+ "failurePolicy": "Abort",
+ "execNewPod": {
+ "command": [
+ "./migrate-database.sh"
+ ],
+ "containerName": "${NAME}"
+ }
+ }
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${NAME}",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 5,
+ "httpGet": {
+ "path": "/articles",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 10,
+ "httpGet": {
+ "path": "/articles",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "POSTGRESQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "keybase"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "value": "${APPLICATION_DOMAIN}"
+ },
+ {
+ "name": "APPLICATION_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "application-user"
+ }
+ }
+ },
+ {
+ "name": "APPLICATION_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "application-password"
+ }
+ }
+ },
+ {
+ "name": "RAILS_ENV",
+ "value": "${RAILS_ENV}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "postgresql:9.5"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "data",
+ "emptyDir": {}
+ }
+ ],
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 5432
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 5432
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_POSTGRESQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "rails-postgresql-example"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "required": true,
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "required": true,
+ "description": "Maximum amount of memory the Rails container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_POSTGRESQL_LIMIT",
+ "displayName": "Memory Limit (PostgreSQL)",
+ "required": true,
+ "description": "Maximum amount of memory the PostgreSQL container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "description": "The URL of the repository with your application source code.",
+ "value": "https://github.com/openshift/rails-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the Rails service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "displayName": "Secret Key",
+ "description": "Your secret key for verifying the integrity of signed cookies.",
+ "generate": "expression",
+ "from": "[a-z0-9]{127}"
+ },
+ {
+ "name": "APPLICATION_USER",
+ "displayName": "Application Username",
+ "required": true,
+ "description": "The application user that is used within the sample application to authorize access on pages.",
+ "value": "openshift"
+ },
+ {
+ "name": "APPLICATION_PASSWORD",
+ "displayName": "Application Password",
+ "required": true,
+ "description": "The application password that is used within the sample application to authorize access on pages.",
+ "value": "secret"
+ },
+ {
+ "name": "RAILS_ENV",
+ "displayName": "Rails Environment",
+ "required": true,
+ "description": "Environment under which the sample application will run. Could be set to production, development or test.",
+ "value": "production"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "required": true,
+ "displayName": "Database Service Name",
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database Username",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{8}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "required": true,
+ "displayName": "Database Name",
+ "value": "root"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "displayName": "Maximum Database Connections",
+ "value": "100"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "displayName": "Shared Buffer Amount",
+ "value": "12MB"
+ },
+ {
+ "name": "RUBYGEM_MIRROR",
+ "displayName": "Custom RubyGems Mirror URL",
+ "description": "The custom RubyGems mirror URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json b/roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json
new file mode 100644
index 000000000..9d99973be
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json
@@ -0,0 +1,76 @@
+{
+ "kind": "List",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "fis-image-streams",
+ "annotations": {
+ "description": "ImageStream definitions for JBoss Fuse Integration Services."
+ }
+ },
+ "items": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "fis-java-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-fuse-6/fis-java-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Fuse Integration Services 1.0 Java S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,jboss-fuse,java,xpaas",
+ "supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2",
+ "version": "1.0"
+ }
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "description": "JBoss Fuse Integration Services 2.0 Java S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,jboss-fuse,java,xpaas",
+ "supports":"jboss-fuse:6.3.0,java:8,xpaas:1.2",
+ "version": "2.0"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "fis-karaf-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-fuse-6/fis-karaf-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Fuse Integration Services 1.0 Karaf S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,jboss-fuse,java,karaf,xpaas",
+ "supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2",
+ "version": "1.0"
+ }
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "description": "JBoss Fuse Integration Services 2.0 Karaf S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,jboss-fuse,java,karaf,xpaas",
+ "supports":"jboss-fuse:6.3.0,java:8,xpaas:1.2",
+ "version": "2.0"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json
new file mode 100644
index 000000000..049f3f884
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json
@@ -0,0 +1,397 @@
+{
+ "kind": "List",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-image-streams",
+ "annotations": {
+ "description": "ImageStream definitions for JBoss Middleware products."
+ }
+ },
+ "items": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-webserver30-tomcat7-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift",
+ "tags": [
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
+ "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.1",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.1"
+ }
+ },
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
+ "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.2",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.2"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-webserver30-tomcat8-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift",
+ "tags": [
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
+ "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.1",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.1"
+ }
+ },
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
+ "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.2",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.2"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-eap64-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-eap-6/eap64-openshift",
+ "tags": [
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss EAP 6.4 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:6.4,javaee:6,java:8,xpaas:1.1",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.1"
+ }
+ },
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "JBoss EAP 6.4 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:6.4,javaee:6,java:8,xpaas:1.2",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.2"
+ }
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss EAP 6.4 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:6.4,javaee:6,java:8,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.3"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "JBoss EAP 6.4 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:6.4,javaee:6,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.4"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-eap70-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-eap-7/eap70-openshift",
+ "tags": [
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss EAP 7.0 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:7.0,javaee:7,java:8,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "7.0.0.GA",
+ "version": "1.3"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "JBoss EAP 7.0 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:7.0,javaee:7,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "7.0.0.GA",
+ "version": "1.4"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-decisionserver62-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver62-openshift",
+ "tags": [
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "Red Hat JBoss BRMS 6.2 decision server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,decisionserver,java,xpaas",
+ "supports":"decisionserver:6.2,java:8,xpaas:1.2",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "decisionserver/hellorules",
+ "sampleRef": "1.2",
+ "version": "1.2"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-decisionserver63-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver63-openshift",
+ "tags": [
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "Red Hat JBoss BRMS 6.3 decision server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,decisionserver,java,xpaas",
+ "supports":"decisionserver:6.3,java:8,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "decisionserver/hellorules",
+ "sampleRef": "1.3",
+ "version": "1.3"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-processserver63-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-processserver-6/processserver63-openshift",
+ "tags": [
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "Red Hat JBoss BPM Suite 6.3 intelligent process server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,processserver,java,xpaas",
+ "supports":"processserver:6.3,java:8,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "processserver/library",
+ "sampleRef": "1.3",
+ "version": "1.3"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-datagrid65-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift",
+ "tags": [
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "JBoss Data Grid 6.5 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datagrid,java,jboss,xpaas",
+ "supports":"datagrid:6.5,java:8,xpaas:1.2",
+ "version": "1.2"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-datavirt63-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "Red Hat JBoss Data Virtualization 6.3 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datavirt,java,jboss,xpaas",
+ "supports":"datavirt:6.3,java:8,xpaas:1.4",
+ "version": "1.0"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-amq-62"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-amq-6/amq62-openshift",
+ "tags": [
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports":"amq:6.2,messaging,xpaas:1.1",
+ "version": "1.1"
+ }
+ },
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports":"amq:6.2,messaging,xpaas:1.2",
+ "version": "1.2"
+ }
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports":"amq:6.2,messaging,xpaas:1.3",
+ "version": "1.3"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redhat-sso70-openshift",
+ "annotations": {
+ "description": "Red Hat SSO 7.0"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/redhat-sso-7/sso70-openshift",
+ "tags": [
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "Red Hat SSO 7.0",
+ "iconClass": "icon-jboss",
+ "tags": "sso,keycloak,redhat",
+ "supports":"sso:7.0,xpaas:1.3",
+ "version": "1.3"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redhat-openjdk18-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat OpenJDK 8",
+ "description": "Build and run Java applications using Maven and OpenJDK 8.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,java,xpaas,openjdk",
+ "supports":"java:8,xpaas:1.0",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "sampleContextDir": "undertow-servlet",
+ "version": "1.0"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json
new file mode 100644
index 000000000..ab35afead
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json
@@ -0,0 +1,321 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.3.1"
+ },
+ "name": "amq62-basic"
+ },
+ "labels": {
+ "template": "amq62-basic",
+ "xpaas": "1.3.1"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json
new file mode 100644
index 000000000..c12f06dec
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json
@@ -0,0 +1,549 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.3.1"
+ },
+ "name": "amq62-persistent-ssl"
+ },
+ "labels": {
+ "template": "amq62-persistent-ssl",
+ "xpaas": "1.3.1"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "Name of a secret containing SSL related files",
+ "name": "AMQ_SECRET",
+ "value": "amq-app-secret",
+ "required": true
+ },
+ {
+ "description": "SSL trust store filename",
+ "name": "AMQ_TRUSTSTORE",
+ "value": "broker.ts",
+ "required": true
+ },
+ {
+ "description": "SSL trust store password",
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "SSL key store filename",
+ "name": "AMQ_KEYSTORE",
+ "value": "broker.ks",
+ "required": true
+ },
+ {
+ "description": "Password for accessing SSL keystore",
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5671,
+ "targetPort": 5671
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8883,
+ "targetPort": 8883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61612,
+ "targetPort": 61612
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61617,
+ "targetPort": 61617
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire (SSL) port."
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "amq-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "broker-secret-volume",
+ "mountPath": "/etc/amq-secret-volume",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt-ssl",
+ "containerPort": 8883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_KEYSTORE_TRUSTSTORE_DIR",
+ "value": "/etc/amq-secret-volume"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE",
+ "value": "${AMQ_TRUSTSTORE}"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "${AMQ_TRUSTSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_KEYSTORE",
+ "value": "${AMQ_KEYSTORE}"
+ },
+ {
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "${AMQ_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "broker-secret-volume",
+ "secret": {
+ "secretName": "${AMQ_SECRET}"
+ }
+ },
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json
new file mode 100644
index 000000000..897ce0395
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json
@@ -0,0 +1,371 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.3.1"
+ },
+ "name": "amq62-persistent"
+ },
+ "labels": {
+ "template": "amq62-persistent",
+ "xpaas": "1.3.1"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json
new file mode 100644
index 000000000..97d110286
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json
@@ -0,0 +1,503 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.3.1"
+ },
+ "name": "amq62-ssl"
+ },
+ "labels": {
+ "template": "amq62-ssl",
+ "xpaas": "1.3.1"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. SSL variants of these protocols will be configured automaticaly.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Name of a secret containing SSL related files",
+ "name": "AMQ_SECRET",
+ "value": "amq-app-secret",
+ "required": true
+ },
+ {
+ "description": "SSL trust store filename",
+ "name": "AMQ_TRUSTSTORE",
+ "value": "broker.ts",
+ "required": true
+ },
+ {
+ "description": "SSL trust store password",
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "SSL key store filename",
+ "name": "AMQ_KEYSTORE",
+ "value": "broker.ks",
+ "required": true
+ },
+ {
+ "description": "Password for accessing SSL keystore",
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5671,
+ "targetPort": 5671
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8883,
+ "targetPort": 8883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61612,
+ "targetPort": 61612
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61617,
+ "targetPort": 61617
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire (SSL) port."
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "amq-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "broker-secret-volume",
+ "mountPath": "/etc/amq-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt-ssl",
+ "containerPort": 8883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_KEYSTORE_TRUSTSTORE_DIR",
+ "value": "/etc/amq-secret-volume"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE",
+ "value": "${AMQ_TRUSTSTORE}"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "${AMQ_TRUSTSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_KEYSTORE",
+ "value": "${AMQ_KEYSTORE}"
+ },
+ {
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "${AMQ_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "broker-secret-volume",
+ "secret": {
+ "secretName": "${AMQ_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json
new file mode 100644
index 000000000..56e76016f
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json
@@ -0,0 +1,332 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 6.5 applications.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "datagrid65-basic"
+ },
+ "labels": {
+ "template": "datagrid65-basic",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Password for JDG user.",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-openshift:1.2"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid65-openshift",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json
new file mode 100644
index 000000000..639ac2e11
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json
@@ -0,0 +1,501 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 6.5 applications.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "datagrid65-https"
+ },
+ "labels": {
+ "template": "datagrid65-https",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Password for JDG user.",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "datagrid-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datagrid-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-openshift:1.2"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "datagrid-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid65-openshift",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "datagrid-keystore-volume",
+ "mountPath": "/etc/datagrid-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datagrid-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "datagrid-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json
new file mode 100644
index 000000000..22ca3f0a0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json
@@ -0,0 +1,779 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 6.5 and MySQL applications with persistent storage.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "datagrid65-mysql-persistent"
+ },
+ "labels": {
+ "template": "datagrid65-mysql-persistent",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Password for JDG user.",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "datagrid-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:/jboss/datasources/mysql",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datagrid-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-openshift:1.2"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "datagrid-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid65-openshift",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "datagrid-keystore-volume",
+ "mountPath": "/etc/datagrid-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datagrid-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "datagrid-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json
new file mode 100644
index 000000000..e1a585d24
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json
@@ -0,0 +1,739 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 6.5 and MySQL applications.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "datagrid65-mysql"
+ },
+ "labels": {
+ "template": "datagrid65-mysql",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Password for JDG user.",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "datagrid-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:/jboss/datasources/mysql",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datagrid-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-openshift:1.2"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "datagrid-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid65-openshift",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "datagrid-keystore-volume",
+ "mountPath": "/etc/datagrid-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datagrid-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "datagrid-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json
new file mode 100644
index 000000000..12720eb19
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json
@@ -0,0 +1,756 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 6.5 and PostgreSQL applications with persistent storage.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "datagrid65-postgresql-persistent"
+ },
+ "labels": {
+ "template": "datagrid65-postgresql-persistent",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Password for JDG user.",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "datagrid-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/postgresql",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datagrid-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-openshift:1.2"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "datagrid-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid65-openshift",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "datagrid-keystore-volume",
+ "mountPath": "/etc/datagrid-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datagrid-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "datagrid-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json
new file mode 100644
index 000000000..da8015fb0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json
@@ -0,0 +1,716 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 6.5 and PostgreSQL applications built using.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "datagrid65-postgresql"
+ },
+ "labels": {
+ "template": "datagrid65-postgresql",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Password for JDG user.",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "datagrid-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/postgresql",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configurd for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datagrid-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-openshift:1.2"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "datagrid-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid65-openshift",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "datagrid-keystore-volume",
+ "mountPath": "/etc/datagrid-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datagrid-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "datagrid-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json
new file mode 100644
index 000000000..7d64dac98
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json
@@ -0,0 +1,415 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JBoss Data Virtualization 6.3 services built using S2I.",
+ "tags": "jdv,datavirt,jboss,xpaas",
+ "version": "1.4.0"
+ },
+ "name": "datavirt63-basic-s2i"
+ },
+ "labels": {
+ "template": "datavirt63-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data service has been created in your project. The username/password for accessing the service is ${TEIID_USERNAME}/${TEIID_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the secret named ${CONFIGURATION_NAME} containing the datasource configuration details required by the deployed VDB(s).",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "datavirt-app",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing configuration properties for the data sources.",
+ "displayName": "Configuration Secret Name",
+ "name": "CONFIGURATION_NAME",
+ "value": "datavirt-app-config",
+ "required": true
+ },
+ {
+ "description": "Specify a custom hostname for the http route. Leave blank to use default hostname, e.g.: <service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with your application source code.",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your repository.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/dynamicvdb-datafederation/app",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret specified by CONFIGURATION_NAME.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "datavirt-service-account",
+ "required": true
+ },
+ {
+ "description": "Username associated with Teiid data service.",
+ "displayName": "Teiid Username",
+ "name": "TEIID_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for Teiid user.",
+ "displayName": "Teiid User Password",
+ "name": "TEIID_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Username associated with ModeShape.",
+ "displayName": "ModeShape Username",
+ "name": "MODESHAPE_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for ModeShape user.",
+ "displayName": "ModeShape User Password",
+ "name": "MODESHAPE_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the GitHub webhook.",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the Generic webhook.",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "displayName": "JGroups Cluster Password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "displayName": "Deploy Exploded Archives",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "name": "http",
+ "port": 8080,
+ "targetPort": "http"
+ },
+ {
+ "name": "jdbc",
+ "port": 31000,
+ "targetPort": "jdbc"
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The data virtualization services."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "port": {
+ "targetPort": "http"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datavirt63-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/datavirt-environment",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbc",
+ "containerPort": 31000,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ },
+ {
+ "name": "MODESHAPE_USERNAME",
+ "value": "${MODESHAPE_USERNAME}"
+ },
+ {
+ "name": "MODESHAPE_PASSWORD",
+ "value": "${MODESHAPE_PASSWORD}"
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/datavirt-environment/*"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json
new file mode 100644
index 000000000..1e7c03b99
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json
@@ -0,0 +1,763 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JBoss Data Virtualization 6.3 services built using S2I. Includes support for installing extensions (e.g. third-party DB drivers) and the ability to configure certificates for serving secure content.",
+ "tags": "jdv,datavirt,jboss,xpaas",
+ "version": "1.4.0"
+ },
+ "name": "datavirt63-extensions-support-s2i"
+ },
+ "labels": {
+ "template": "datavirt63-extensions-support-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data service has been created in your project. The username/password for accessing the service is ${TEIID_USERNAME}/${TEIID_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed VDB(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "datavirt-app",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing configuration properties for the data sources.",
+ "displayName": "Configuration Secret Name",
+ "name": "CONFIGURATION_NAME",
+ "value": "datavirt-app-config",
+ "required": true
+ },
+ {
+ "description": "Specify a custom hostname for the http route. Leave blank to use default hostname, e.g.: <service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the https route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom https Route Hostname",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the JDBC route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom JDBC Route Hostname",
+ "name": "HOSTNAME_JDBC",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with your application source code.",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your repository.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/dynamicvdb-datafederation/app",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with source code for the extensions image. The image should have all modules, etc., placed in the \"/extensions/\" directory in the image. If the contents are in a different directory, the sourcePath for the ImageSource in the BuildConfig must be modified.",
+ "displayName": "Extensions Git Repository URL",
+ "name": "EXTENSIONS_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your extensions repository if you are not using the default branch.",
+ "displayName": "Extensions Git Reference",
+ "name": "EXTENSIONS_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your extensions repository.",
+ "displayName": "Extensions Context Directory",
+ "name": "EXTENSIONS_DIR",
+ "value": "datavirt/derby-driver-image",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to the Dockerfile in your extensions directory.",
+ "displayName": "Extensions Dockerfile",
+ "name": "EXTENSIONS_DOCKERFILE",
+ "value": "Dockerfile",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret(s) specified by CONFIGURATION_NAME, HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "datavirt-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for serving secure content.",
+ "displayName": "Server Keystore Secret Name",
+ "name": "HTTPS_SECRET",
+ "value": "datavirt-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret.",
+ "displayName": "Server Keystore Filename",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS).",
+ "displayName": "Server Keystore Type",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate.",
+ "displayName": "Server Certificate Name",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "Server Keystore Password",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Username associated with Teiid data service.",
+ "displayName": "Teiid Username",
+ "name": "TEIID_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for Teiid user.",
+ "displayName": "Teiid User Password",
+ "name": "TEIID_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Username associated with ModeShape.",
+ "displayName": "ModeShape Username",
+ "name": "MODESHAPE_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for ModeShape user.",
+ "displayName": "ModeShape User Password",
+ "name": "MODESHAPE_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the GitHub webhook.",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the Generic webhook.",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
+ "displayName": "JGroups Secret Name",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datavirt-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the JGroups secret.",
+ "displayName": "JGroups Keystore Filename",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the JGroups server certificate",
+ "displayName": "JGroups Certificate Name",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "secret-key",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "JGroups Keystore Password",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "password",
+ "required": false
+ },
+ {
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "displayName": "JGroups Cluster Password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "displayName": "Deploy Exploded Archives",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "name": "http",
+ "port": 8080,
+ "targetPort": "http"
+ },
+ {
+ "name": "https",
+ "port": 8443,
+ "targetPort": "https"
+ },
+ {
+ "name": "jdbc",
+ "port": 31000,
+ "targetPort": "jdbc"
+ },
+ {
+ "name": "jdbcs",
+ "port": 31443,
+ "targetPort": "jdbcs"
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The data virtualization services."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "port": {
+ "targetPort": "http"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "port": {
+ "targetPort": "https"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-jdbc",
+ "metadata": {
+ "name": "jdbc-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's JDBC service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_JDBC}",
+ "port": {
+ "targetPort": "jdbcs"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-ext",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-ext",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${EXTENSIONS_REPOSITORY_URL}",
+ "ref": "${EXTENSIONS_REPOSITORY_REF}"
+ },
+ "contextDir": "${EXTENSIONS_DIR}"
+ },
+ "strategy": {
+ "type": "Docker",
+ "dockerStrategy": {
+ "dockerfilePath": "${EXTENSIONS_DOCKERFILE}"
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}-ext:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}-ext:latest"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/extras",
+ "sourcePath": "/extensions/."
+ }
+ ]
+ }
+ ]
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datavirt63-openshift:1.0"
+ },
+ "env": [
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}-ext:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/datavirt-environment",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "mountPath": "/etc/datavirt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbc",
+ "containerPort": 31000,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbcs",
+ "containerPort": 31443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datavirt-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ },
+ {
+ "name": "MODESHAPE_USERNAME",
+ "value": "${MODESHAPE_USERNAME}"
+ },
+ {
+ "name": "MODESHAPE_PASSWORD",
+ "value": "${MODESHAPE_PASSWORD}"
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/datavirt-environment/*"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE",
+ "value": "/etc/datavirt-secret-volume/${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEY_ALIAS",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "QS_DB_TYPE",
+ "value": "derby",
+ "description": "Used soley by the quickstart and set here to ensure the template can be instatiated with its default parameter values, i.e. so itworks ootb."
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json
new file mode 100644
index 000000000..07f926ff3
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json
@@ -0,0 +1,642 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JBoss Data Virtualization 6.3 services built using S2I. Includes ability to configure certificates for serving secure content.",
+ "tags": "jdv,datavirt,jboss,xpaas",
+ "version": "1.4.0"
+ },
+ "name": "datavirt63-secure-s2i"
+ },
+ "labels": {
+ "template": "datavirt63-secure-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data service has been created in your project. The username/password for accessing the service is ${TEIID_USERNAME}/${TEIID_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed VDB(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "datavirt-app",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing configuration properties for the data sources.",
+ "displayName": "Configuration Secret Name",
+ "name": "CONFIGURATION_NAME",
+ "value": "datavirt-app-config",
+ "required": true
+ },
+ {
+ "description": "Specify a custom hostname for the http route. Leave blank to use default hostname, e.g.: <service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the https route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom https Route Hostname",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the JDBC route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom JDBC Route Hostname",
+ "name": "HOSTNAME_JDBC",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with your application source code.",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your repository.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/dynamicvdb-datafederation/app",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret(s) specified by CONFIGURATION_NAME, HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "datavirt-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for serving secure content.",
+ "displayName": "Server Keystore Secret Name",
+ "name": "HTTPS_SECRET",
+ "value": "datavirt-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret.",
+ "displayName": "Server Keystore Filename",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS).",
+ "displayName": "Server Keystore Type",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate.",
+ "displayName": "Server Certificate Name",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "Server Keystore Password",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Username associated with Teiid data service.",
+ "displayName": "Teiid Username",
+ "name": "TEIID_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for Teiid user.",
+ "displayName": "Teiid User Password",
+ "name": "TEIID_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Username associated with ModeShape.",
+ "displayName": "ModeShape Username",
+ "name": "MODESHAPE_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for ModeShape user.",
+ "displayName": "ModeShape User Password",
+ "name": "MODESHAPE_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the GitHub webhook.",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the Generic webhook.",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
+ "displayName": "JGroups Secret Name",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datavirt-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the JGroups secret.",
+ "displayName": "JGroups Keystore Filename",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the JGroups server certificate",
+ "displayName": "JGroups Certificate Name",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "secret-key",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "JGroups Keystore Password",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "password",
+ "required": false
+ },
+ {
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "displayName": "JGroups Cluster Password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "displayName": "Deploy Exploded Archives",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "name": "http",
+ "port": 8080,
+ "targetPort": "http"
+ },
+ {
+ "name": "https",
+ "port": 8443,
+ "targetPort": "https"
+ },
+ {
+ "name": "jdbc",
+ "port": 31000,
+ "targetPort": "jdbc"
+ },
+ {
+ "name": "jdbcs",
+ "port": 31443,
+ "targetPort": "jdbcs"
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The data virtualization services."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "port": {
+ "targetPort": "http"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "port": {
+ "targetPort": "https"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-jdbc",
+ "metadata": {
+ "name": "jdbc-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's JDBC service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_JDBC}",
+ "port": {
+ "targetPort": "jdbcs"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datavirt63-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/datavirt-environment",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "mountPath": "/etc/datavirt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbc",
+ "containerPort": 31000,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbcs",
+ "containerPort": 31443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datavirt-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ },
+ {
+ "name": "MODESHAPE_USERNAME",
+ "value": "${MODESHAPE_USERNAME}"
+ },
+ {
+ "name": "MODESHAPE_PASSWORD",
+ "value": "${MODESHAPE_PASSWORD}"
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/datavirt-environment/*"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE",
+ "value": "/etc/datavirt-secret-volume/${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEY_ALIAS",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json
new file mode 100644
index 000000000..754a3b4c0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json
@@ -0,0 +1,686 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.2 decision server A-MQ applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,amq,java,messaging,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "decisionserver62-amq-s2i"
+ },
+ "labels": {
+ "template": "decisionserver62-amq-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "decisionserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver62-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "decisionserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "mountPath": "/etc/decisionserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/decisionserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json
new file mode 100644
index 000000000..8be4ac90b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json
@@ -0,0 +1,339 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.2 decision server applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,java,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "decisionserver62-basic-s2i"
+ },
+ "labels": {
+ "template": "decisionserver62-basic-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver62-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json
new file mode 100644
index 000000000..bf9047599
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json
@@ -0,0 +1,473 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.2 decision server HTTPS applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,java,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "decisionserver62-https-s2i"
+ },
+ "labels": {
+ "template": "decisionserver62-https-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "decisionserver-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver62-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "decisionserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "mountPath": "/etc/decisionserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/decisionserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json
new file mode 100644
index 000000000..51e667e02
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json
@@ -0,0 +1,696 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.3 decision server A-MQ applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,amq,java,messaging,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "decisionserver63-amq-s2i"
+ },
+ "labels": {
+ "template": "decisionserver63-amq-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "decisionserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "decisionserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "mountPath": "/etc/decisionserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/decisionserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json
new file mode 100644
index 000000000..c5f0d006a
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json
@@ -0,0 +1,339 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.3 decision server applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,java,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "decisionserver63-basic-s2i"
+ },
+ "labels": {
+ "template": "decisionserver63-basic-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json
new file mode 100644
index 000000000..3db0e4c84
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json
@@ -0,0 +1,473 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.3 decision server HTTPS applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,java,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "decisionserver63-https-s2i"
+ },
+ "labels": {
+ "template": "decisionserver63-https-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "decisionserver-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "decisionserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "mountPath": "/etc/decisionserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/decisionserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json
new file mode 100644
index 000000000..72dbb4302
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json
@@ -0,0 +1,813 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 A-MQ applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-amq-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap64-amq-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "6.4.x",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
+ "name": "MQ_JNDI",
+ "value": "java:/ConnectionFactory",
+ "required": false
+ },
+ {
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
+ },
+ {
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data/kahadb",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json
new file mode 100644
index 000000000..9dd847451
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json
@@ -0,0 +1,760 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 A-MQ applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-amq-s2i"
+ },
+ "labels": {
+ "template": "eap64-amq-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "6.4.x",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
+ "name": "MQ_JNDI",
+ "value": "java:/ConnectionFactory",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
+ },
+ {
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json
new file mode 100644
index 000000000..7b1800b7b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json
@@ -0,0 +1,340 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 6 applications built using S2I.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-basic-s2i"
+ },
+ "labels": {
+ "template": "eap64-basic-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "6.4.x",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "kitchensink",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json
new file mode 100644
index 000000000..31716d84c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json
@@ -0,0 +1,525 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 6 applications built using S2I.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-https-s2i"
+ },
+ "labels": {
+ "template": "eap64-https-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "6.4.x",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "kitchensink",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..212431056
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json
@@ -0,0 +1,781 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 MongoDB applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-mongodb-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap64-mongodb-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json
new file mode 100644
index 000000000..13fbbdd93
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json
@@ -0,0 +1,741 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 MongoDB applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-mongodb-s2i"
+ },
+ "labels": {
+ "template": "eap64-mongodb-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json
new file mode 100644
index 000000000..69fdec206
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json
@@ -0,0 +1,792 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap64-mysql-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-mysql"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json
new file mode 100644
index 000000000..2bd3c249f
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json
@@ -0,0 +1,752 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-mysql-s2i"
+ },
+ "labels": {
+ "template": "eap64-mysql-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-mysql"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..31f245950
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json
@@ -0,0 +1,769 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap64-postgresql-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-postgresql"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json
new file mode 100644
index 000000000..eac964697
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json
@@ -0,0 +1,729 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-postgresql-s2i"
+ },
+ "labels": {
+ "template": "eap64-postgresql-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-postgresql"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json
new file mode 100644
index 000000000..09023be71
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json
@@ -0,0 +1,756 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass" : "icon-jboss",
+ "description": "Application template for EAP 6 applications built using S2I, enabled for SSO.",
+ "tags": "eap,javaee,java,jboss,xpaas,sso,keycloak",
+ "version": "1.3.2"
+ },
+ "name": "eap64-sso-s2i"
+ },
+ "labels": {
+ "template": "eap64-sso-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Hostname for http service route (e.g. eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "Hostname for https service route (e.g. secure-eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/redhat-developer/redhat-sso-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "7.0.x-ose",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "The URL for the SSO server (e.g. https://secure-sso-myproject.example.com/auth). This is the URL through which the user will be redirected when a login or token is required by the application.",
+ "name": "SSO_URL",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "The URL for the interal SSO service, where secure-sso is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
+ "name": "SSO_SERVICE_URL",
+ "value": "https://secure-sso:8443/auth",
+ "required": false
+ },
+ {
+ "description": "The SSO realm to which the application client(s) should be associated (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "The username used to access the SSO service. This is used to create the appliction client(s) within the specified SSO realm. This should match the SSO_SERVICE_USERNAME specified through one of the sso70-* templates.",
+ "name": "SSO_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "name": "SSO_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "SSO Public Key. Public key is recommended to be passed into the template to avoid man-in-the-middle security vulnerability. This can be retrieved from the SSO server, for the specified realm.",
+ "name": "SSO_PUBLIC_KEY",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "SSO Client Access Type",
+ "name": "SSO_BEARER_ONLY",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "app-jee-jsp/target,service-jee-jaxrs/target,app-profile-jee-jsp/target,app-profile-saml-jee-jsp/target",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "SSO_SAML_KEYSTORE_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "SSO_SAML_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "SSO_SAML_CERTIFICATE_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "SSO_SAML_KEYSTORE_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "The SSO Client Secret for Confidential Access",
+ "name": "SSO_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Enable CORS for SSO applications",
+ "name": "SSO_ENABLE_CORS",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "SSO logout page for SAML applications",
+ "name": "SSO_SAML_LOGOUT_PAGE",
+ "value": "/",
+ "required": false
+ },
+ {
+ "description": "If true SSL communication between EAP and the SSO Server will be insecure (i.e. certificate validation is disabled with curl)",
+ "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
+ "value": "true",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ },
+ "env": [
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": ""
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "sso-saml-keystore-volume",
+ "mountPath": "/etc/sso-saml-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HOSTNAME_HTTP",
+ "value": "${HOSTNAME_HTTP}"
+ },
+ {
+ "name": "HOSTNAME_HTTPS",
+ "value": "${HOSTNAME_HTTPS}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "SSO_URL",
+ "value": "${SSO_URL}"
+ },
+ {
+ "name": "SSO_SERVICE_URL",
+ "value": "${SSO_SERVICE_URL}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_USERNAME",
+ "value": "${SSO_USERNAME}"
+ },
+ {
+ "name": "SSO_PASSWORD",
+ "value": "${SSO_PASSWORD}"
+ },
+ {
+ "name": "SSO_PUBLIC_KEY",
+ "value": "${SSO_PUBLIC_KEY}"
+ },
+ {
+ "name": "SSO_BEARER_ONLY",
+ "value": "${SSO_BEARER_ONLY}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_SECRET",
+ "value": "${SSO_SAML_KEYSTORE_SECRET}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE",
+ "value": "${SSO_SAML_KEYSTORE}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_DIR",
+ "value": "/etc/sso-saml-secret-volume"
+ },
+ {
+ "name": "SSO_SAML_CERTIFICATE_NAME",
+ "value": "${SSO_SAML_CERTIFICATE_NAME}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_PASSWORD",
+ "value": "${SSO_SAML_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "SSO_SECRET",
+ "value": "${SSO_SECRET}"
+ },
+ {
+ "name": "SSO_ENABLE_CORS",
+ "value": "${SSO_ENABLE_CORS}"
+ },
+ {
+ "name": "SSO_SAML_LOGOUT_PAGE",
+ "value": "${SSO_SAML_LOGOUT_PAGE}"
+ },
+ {
+ "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
+ "value": "${SSO_DISABLE_SSL_CERTIFICATE_VALIDATION}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "sso-saml-keystore-volume",
+ "secret": {
+ "secretName": "${SSO_SAML_KEYSTORE_SECRET}"
+ }
+ },
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json
new file mode 100644
index 000000000..f08cdf2f9
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json
@@ -0,0 +1,813 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 A-MQ applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-amq-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap70-amq-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
+ "name": "MQ_JNDI",
+ "value": "java:/ConnectionFactory",
+ "required": false
+ },
+ {
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
+ },
+ {
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data/kahadb",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json
new file mode 100644
index 000000000..3ca9e9fab
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json
@@ -0,0 +1,760 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 A-MQ applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-amq-s2i"
+ },
+ "labels": {
+ "template": "eap70-amq-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
+ "name": "MQ_JNDI",
+ "value": "java:/ConnectionFactory",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
+ },
+ {
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json
new file mode 100644
index 000000000..83b4d5b24
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json
@@ -0,0 +1,351 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 7 applications built using S2I.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-basic-s2i"
+ },
+ "labels": {
+ "template": "eap70-basic-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "7.0.0.GA",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "kitchensink",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json
new file mode 100644
index 000000000..1292442a4
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json
@@ -0,0 +1,536 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 7 applications built using S2I.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-https-s2i"
+ },
+ "labels": {
+ "template": "eap70-https-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "7.0.0.GA",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "kitchensink",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..99db77d58
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json
@@ -0,0 +1,792 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 MongoDB applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-mongodb-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap70-mongodb-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json
new file mode 100644
index 000000000..c8150c231
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json
@@ -0,0 +1,752 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 MongoDB applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-mongodb-s2i"
+ },
+ "labels": {
+ "template": "eap70-mongodb-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json
new file mode 100644
index 000000000..f8e5c2b04
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json
@@ -0,0 +1,807 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap70-mysql-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-mysql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-mysql"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json
new file mode 100644
index 000000000..1edeb62e7
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json
@@ -0,0 +1,767 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-mysql-s2i"
+ },
+ "labels": {
+ "template": "eap70-mysql-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-mysql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-mysql"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..d11df06ee
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json
@@ -0,0 +1,784 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap70-postgresql-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-postgresql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-postgresql"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json
new file mode 100644
index 000000000..6b7f6d707
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json
@@ -0,0 +1,744 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-postgresql-s2i"
+ },
+ "labels": {
+ "template": "eap70-postgresql-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-postgresql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-postgresql"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json
new file mode 100644
index 000000000..811602220
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json
@@ -0,0 +1,767 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass" : "icon-jboss",
+ "description": "Application template for EAP 6 applications built using S2I, enabled for SSO.",
+ "tags": "eap,javaee,java,jboss,xpaas,sso,keycloak",
+ "version": "1.3.2"
+ },
+ "name": "eap70-sso-s2i"
+ },
+ "labels": {
+ "template": "eap70-sso-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Hostname for http service route (e.g. eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "Hostname for https service route (e.g. secure-eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/redhat-developer/redhat-sso-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "7.0.x-ose",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "The URL for the SSO server (e.g. https://secure-sso-myproject.example.com/auth). This is the URL through which the user will be redirected when a login or token is required by the application.",
+ "name": "SSO_URL",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "The URL for the interal SSO service, where secure-sso (the default) is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
+ "name": "SSO_SERVICE_URL",
+ "value": "https://secure-sso:8443/auth",
+ "required": false
+ },
+ {
+ "description": "The SSO realm to which the application client(s) should be associated (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "The username used to access the SSO service. This is used to create the appliction client(s) within the specified SSO realm. This should match the SSO_SERVICE_USERNAME specified through one of the sso70-* templates.",
+ "name": "SSO_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "name": "SSO_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "SSO Public Key. Public key is recommended to be passed into the template to avoid man-in-the-middle security vulnerability",
+ "name": "SSO_PUBLIC_KEY",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "SSO Client Access Type",
+ "name": "SSO_BEARER_ONLY",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "app-jee-jsp/target,service-jee-jaxrs/target,app-profile-jee-jsp/target,app-profile-saml-jee-jsp/target",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "SSO_SAML_KEYSTORE_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "SSO_SAML_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "SSO_SAML_CERTIFICATE_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "SSO_SAML_KEYSTORE_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "The SSO Client Secret for Confidential Access",
+ "name": "SSO_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Enable CORS for SSO applications",
+ "name": "SSO_ENABLE_CORS",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "SSO logout page for SAML applications",
+ "name": "SSO_SAML_LOGOUT_PAGE",
+ "value": "/",
+ "required": false
+ },
+ {
+ "description": "If true SSL communication between EAP and the SSO Server will be insecure (i.e. certificate validation is disabled with curl)",
+ "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
+ "value": "true",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ },
+ "env": [
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": ""
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "sso-saml-keystore-volume",
+ "mountPath": "/etc/sso-saml-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HOSTNAME_HTTP",
+ "value": "${HOSTNAME_HTTP}"
+ },
+ {
+ "name": "HOSTNAME_HTTPS",
+ "value": "${HOSTNAME_HTTPS}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "SSO_URL",
+ "value": "${SSO_URL}"
+ },
+ {
+ "name": "SSO_SERVICE_URL",
+ "value": "${SSO_SERVICE_URL}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_USERNAME",
+ "value": "${SSO_USERNAME}"
+ },
+ {
+ "name": "SSO_PASSWORD",
+ "value": "${SSO_PASSWORD}"
+ },
+ {
+ "name": "SSO_PUBLIC_KEY",
+ "value": "${SSO_PUBLIC_KEY}"
+ },
+ {
+ "name": "SSO_BEARER_ONLY",
+ "value": "${SSO_BEARER_ONLY}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_SECRET",
+ "value": "${SSO_SAML_KEYSTORE_SECRET}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE",
+ "value": "${SSO_SAML_KEYSTORE}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_DIR",
+ "value": "/etc/sso-saml-secret-volume"
+ },
+ {
+ "name": "SSO_SAML_CERTIFICATE_NAME",
+ "value": "${SSO_SAML_CERTIFICATE_NAME}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_PASSWORD",
+ "value": "${SSO_SAML_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "SSO_SECRET",
+ "value": "${SSO_SECRET}"
+ },
+ {
+ "name": "SSO_ENABLE_CORS",
+ "value": "${SSO_ENABLE_CORS}"
+ },
+ {
+ "name": "SSO_SAML_LOGOUT_PAGE",
+ "value": "${SSO_SAML_LOGOUT_PAGE}"
+ },
+ {
+ "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
+ "value": "${SSO_DISABLE_SSL_CERTIFICATE_VALIDATION}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "sso-saml-keystore-volume",
+ "secret": {
+ "secretName": "${SSO_SAML_KEYSTORE_SECRET}"
+ }
+ },
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json
new file mode 100644
index 000000000..413a6de87
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json
@@ -0,0 +1,284 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat7-basic-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-basic-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json
new file mode 100644
index 000000000..610ea9441
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json
@@ -0,0 +1,398 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat7-https-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-https-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..6ef9d6e4c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
@@ -0,0 +1,654 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat7-mongodb-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-mongodb-persistent-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
new file mode 100644
index 000000000..9b48f8ae7
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
@@ -0,0 +1,614 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications built using S2I.",
+ "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat7-mongodb-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-mongodb-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
new file mode 100644
index 000000000..30af703ce
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
@@ -0,0 +1,656 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat7-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-mysql-persistent-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json
new file mode 100644
index 000000000..c2843af63
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json
@@ -0,0 +1,616 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications built using S2I.",
+ "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat7-mysql-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-mysql-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..b8372f374
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
@@ -0,0 +1,633 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "jws30-tomcat7-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-postgresql-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
new file mode 100644
index 000000000..cd5bb9fa4
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
@@ -0,0 +1,593 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications built using S2I.",
+ "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "jws30-tomcat7-postgresql-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-postgresql-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json
new file mode 100644
index 000000000..cb1e49d29
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json
@@ -0,0 +1,284 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat8-basic-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-basic-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json
new file mode 100644
index 000000000..21d5662c7
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json
@@ -0,0 +1,398 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat8-https-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-https-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..34657d826
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
@@ -0,0 +1,654 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat8-mongodb-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-mongodb-persistent-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
new file mode 100644
index 000000000..974cfaddb
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
@@ -0,0 +1,614 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications built using S2I.",
+ "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat8-mongodb-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-mongodb-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
new file mode 100644
index 000000000..7a8231cc5
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
@@ -0,0 +1,656 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat8-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-mysql-persistent-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json
new file mode 100644
index 000000000..cda21f237
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json
@@ -0,0 +1,616 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications built using S2I.",
+ "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat8-mysql-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-mysql-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..4dfc98015
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
@@ -0,0 +1,633 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "jws30-tomcat8-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-postgresql-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
new file mode 100644
index 000000000..f6c85668c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
@@ -0,0 +1,591 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications built using S2I.",
+ "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "jws30-tomcat8-postgresql-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-postgresql-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json
new file mode 100644
index 000000000..cd0bec3c1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json
@@ -0,0 +1,362 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Camel route using ActiveMQ in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-amq"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-amq"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-amq",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-amq.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-amq-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-amq",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "displayName": "ActiveMQ Broker Service",
+ "required": true,
+ "value": "broker-amq-tcp",
+ "description": "Set this to the name of the TCP service of the ActiveMQ broker. You may need to create a broker first."
+ },
+ {
+ "name": "ACTIVEMQ_USERNAME",
+ "displayName": "ActiveMQ Broker Username",
+ "description": "The username used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "ACTIVEMQ_PASSWORD",
+ "displayName": "ActiveMQ Broker Password",
+ "description": "The password used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "value": "${ACTIVEMQ_SERVICE_NAME}"
+ }, {
+ "name": "ACTIVEMQ_USERNAME",
+ "value": "${ACTIVEMQ_USERNAME}"
+ }, {
+ "name": "ACTIVEMQ_PASSWORD",
+ "value": "${ACTIVEMQ_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json
new file mode 100644
index 000000000..2ecce08a9
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json
@@ -0,0 +1,336 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "A simple Camel route in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-log"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-log"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-log",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-log.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-log-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-log",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json
new file mode 100644
index 000000000..d80939efb
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json
@@ -0,0 +1,421 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Camel example using Rest DSL with SQL Database in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-camel-rest-sql"
+ },
+ "labels": {
+ "template": "s2i-karaf2-camel-rest-sql"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-camel-rest-sql",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-camel-rest-sql.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-camel-rest-sql-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-camel-rest",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "MYSQL_SERVICE_NAME",
+ "displayName": "MySQL Server Service",
+ "required": true,
+ "value": "mysql",
+ "description": "Set this to the name of the TCP service of the MySQL server. You may need to create a server first."
+ },
+ {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "displayName": "MySQL Server Database",
+ "required": true,
+ "value": "sampledb",
+ "description": "The database hosted by the MySQL server to be used by the application."
+ },
+ {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "displayName": "MySQL Server Username",
+ "description": "The username used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "displayName": "MySQL Server Password",
+ "description": "The password used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9411,
+ "protocol": "TCP",
+ "targetPort": 8181
+ }
+ ],
+ "selector": {
+ "container": "karaf",
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "MYSQL_SERVICE_NAME",
+ "value": "${MYSQL_SERVICE_NAME}"
+ }, {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "value": "${MYSQL_SERVICE_DATABASE}"
+ }, {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "value": "${MYSQL_SERVICE_USERNAME}"
+ }, {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "value": "${MYSQL_SERVICE_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json
new file mode 100644
index 000000000..f99099868
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json
@@ -0,0 +1,385 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "REST example using CXF in Karaf container.",
+ "tags": "quickstart,java,karaf,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-karaf2-cxf-rest"
+ },
+ "labels": {
+ "template": "s2i-karaf2-cxf-rest"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-karaf2-cxf-rest",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "value": "https://github.com/fabric8-quickstarts/karaf2-cxf-rest.git",
+ "required": true,
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "karaf2-cxf-rest-1.0.0.redhat-000010",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "s2i-karaf2-cxf-rest",
+ "description": "Exposed Service name."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000010",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "install -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "MEMORY_REQUEST",
+ "displayName": "Memory request",
+ "value": "1.5G",
+ "required": true,
+ "description": "The amount of memory required for the container to run."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory limit",
+ "value": "2G",
+ "required": true,
+ "description": "The amount of memory the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "container": "java",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "container": "karaf",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9412,
+ "protocol": "TCP",
+ "targetPort": 8181
+ }
+ ],
+ "selector": {
+ "container": "karaf",
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-karaf-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "container": "karaf",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/readiness-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health-check",
+ "port" : 8181
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8181,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}",
+ "memory": "${MEMORY_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}",
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json
new file mode 100644
index 000000000..143e16756
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json
@@ -0,0 +1,267 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for Java applications built using S2I.",
+ "tags": "java,xpaas",
+ "version": "1.0.0"
+ },
+ "name": "openjdk18-web-basic-s2i"
+ },
+ "labels": {
+ "template": "openjdk18-web-basic-s2i",
+ "xpaas": "1.0.0"
+ },
+ "message": "A new java application has been created in your project.",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "openjdk-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "undertow-servlet",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The application's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-openjdk18-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "env": [
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
new file mode 100644
index 000000000..1dea463ac
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
@@ -0,0 +1,1079 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,amq,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-amq-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver63-amq-mysql-persistent-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB,${APPLICATION_NAME}-mysql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json
new file mode 100644
index 000000000..42264585b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json
@@ -0,0 +1,959 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,amq,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-amq-mysql-s2i"
+ },
+ "labels": {
+ "template": "processserver63-amq-mysql-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..f6d0c99ed
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
@@ -0,0 +1,1052 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,amq,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-amq-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver63-amq-postgresql-persistent-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB,${APPLICATION_NAME}-postgresql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json
new file mode 100644
index 000000000..41c726cf0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json
@@ -0,0 +1,932 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,amq,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-amq-postgresql-s2i"
+ },
+ "labels": {
+ "template": "processserver63-amq-postgresql-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json
new file mode 100644
index 000000000..170c919cb
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json
@@ -0,0 +1,345 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,javaee,java,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-basic-s2i"
+ },
+ "labels": {
+ "template": "processserver63-basic-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.H2Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json
new file mode 100644
index 000000000..89d0db1a6
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json
@@ -0,0 +1,792 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver63-mysql-persistent-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB,${APPLICATION_NAME}-mysql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json
new file mode 100644
index 000000000..26cab29f8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json
@@ -0,0 +1,716 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-mysql-s2i"
+ },
+ "labels": {
+ "template": "processserver63-mysql-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..32a512829
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json
@@ -0,0 +1,765 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver63-postgresql-persistent-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB,${APPLICATION_NAME}-postgresql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json
new file mode 100644
index 000000000..55e2199bb
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json
@@ -0,0 +1,689 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-postgresql-s2i"
+ },
+ "labels": {
+ "template": "processserver63-postgresql-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json
new file mode 100644
index 000000000..8b3cd6ed0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json
@@ -0,0 +1,331 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel and ActiveMQ QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to an ActiveMQ broker and use JMS messaging between two Camel routes using OpenShift. In this example we will use two containers, one container to run as a ActiveMQ broker, and another as a client to the broker, where the Camel routes are running. This quickstart requires the ActiveMQ broker has been deployed and running first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-amq"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-amq"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-amq",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-amq.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-amq-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "displayName": "ActiveMQ Broker Service",
+ "required": true,
+ "value": "broker-amq-tcp",
+ "description": "Set this to the name of the TCP service of the ActiveMQ broker. You may need to create a broker first."
+ },
+ {
+ "name": "ACTIVEMQ_BROKER_USERNAME",
+ "displayName": "ActiveMQ Broker Username",
+ "description": "The username used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "ACTIVEMQ_BROKER_PASSWORD",
+ "displayName": "ActiveMQ Broker Password",
+ "description": "The password used to authenticate with the ActiveMQ broker. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "ACTIVEMQ_SERVICE_NAME",
+ "value": "${ACTIVEMQ_SERVICE_NAME}"
+ }, {
+ "name": "ACTIVEMQ_BROKER_USERNAME",
+ "value": "${ACTIVEMQ_BROKER_USERNAME}"
+ }, {
+ "name": "ACTIVEMQ_BROKER_PASSWORD",
+ "value": "${ACTIVEMQ_BROKER_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json
new file mode 100644
index 000000000..bc5bbad22
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json
@@ -0,0 +1,327 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot and Camel using ConfigMaps and Secrets. This quickstart demonstrates how to configure a Spring-Boot application using Openshift ConfigMaps and Secrets.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-config"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-config"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-config",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-config.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-config-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_ACCOUNT_NAME",
+ "displayName": "Service Account",
+ "value": "qs-camel-config",
+ "required": true,
+ "description": "The Service Account that will be used to run the container. It must be already present in Openshift and have the view role."
+ },
+ {
+ "name": "SECRET_NAME",
+ "displayName": "Secret Name",
+ "value": "camel-config",
+ "required": true,
+ "description": "The name of the Openshift Secret that will be used to configure the application. It must be already present in Openshift."
+ },
+ {
+ "name": "CONFIGMAP_NAME",
+ "displayName": "ConfigMap Name",
+ "value": "camel-config",
+ "required": true,
+ "description": "The name of the Openshift ConfigMap that will be used to configure the application. It must be already present in Openshift."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "volumes": [
+ {
+ "name": "camel-config",
+ "secret": {
+ "secretName": "${SECRET_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "SPRING_CLOUD_KUBERNETES_SECRETS_NAME",
+ "value": "${SECRET_NAME}"
+ }, {
+ "name": "SPRING_CLOUD_KUBERNETES_CONFIG_NAME",
+ "value": "${CONFIGMAP_NAME}"
+ } ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "camel-config",
+ "readOnly": true,
+ "mountPath": "/etc/secrets/camel-config"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json
new file mode 100644
index 000000000..e54fa0d59
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json
@@ -0,0 +1,334 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot, Camel and JBoss BRMS QuickStart. This example demonstrates how you can use Apache Camel and JBoss BRMS with Spring Boot on OpenShift. DRL files contain simple rules which are used to create knowledge session via Spring configuration file. Camel routes, defined via Spring as well, are then used to e.g. pass (insert) the Body of the message as a POJO to Drools engine for execution.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-drools"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-drools"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-drools",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-drools.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-drools-1.0.0.redhat-000054",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "KIESERVER_SERVICE",
+ "displayName": "Decision Server Name",
+ "required": true,
+ "value": "kie-app",
+ "description": "Set this to the name of the Decision Server. You may need to create an instance before."
+ },
+ {
+ "name": "KIESERVER_USERNAME",
+ "displayName": "Decision Server Username",
+ "required": true,
+ "value": "kieserver",
+ "description": "The username used to authenticate with the Decision Server."
+ },
+ {
+ "name": "KIESERVER_PASSWORD",
+ "displayName": "Decision Server Password",
+ "required": true,
+ "description": "The password used to authenticate with the Decision Server."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000054",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "KIESERVER_SERVICE",
+ "value": "${KIESERVER_SERVICE}"
+ }, {
+ "name": "KIESERVER_USERNAME",
+ "value": "${KIESERVER_USERNAME}"
+ }, {
+ "name": "KIESERVER_PASSWORD",
+ "value": "${KIESERVER_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json
new file mode 100644
index 000000000..20ba97dac
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json
@@ -0,0 +1,315 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel and JBoss Data Grid QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to a JBoss Data Grid (or Infinispan) server using the Hot Rod protocol. It requires that the data grid server (or cluster) has been deployed first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-infinispan"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-infinispan"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-infinispan",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-infinispan.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-infinispan-1.0.0.redhat-000024",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "INFINISPAN_SERVICE",
+ "displayName": "JBoss Data Grid Service (Hot Rod)",
+ "required": true,
+ "value": "datagrid-app-hotrod",
+ "description": "Set this to the name of the Hot Rod service of the JBoss Data Grid. You may need to create the data grid first."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000024",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "INFINISPAN_SERVICE",
+ "value": "${INFINISPAN_SERVICE}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json
new file mode 100644
index 000000000..555647fab
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json
@@ -0,0 +1,403 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring Boot, Camel REST DSL and MySQL QuickStart. This quickstart demonstrates how to connect a Spring Boot application to a MySQL database and expose a REST API with Camel on OpenShift. In this example we will use two containers, one container to run as a MySQL server, and another as a client to the database, where the Camel routes are running. This quickstart requires the MySQL server to be deployed and started first.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-rest-sql"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-rest-sql"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-rest-sql",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-rest-sql.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-rest-sql-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "camel-rest-sql",
+ "description": "Exposed service name."
+ },
+ {
+ "name": "MYSQL_SERVICE_NAME",
+ "displayName": "MySQL Server Service",
+ "required": true,
+ "value": "mysql",
+ "description": "Set this to the name of the TCP service of the MySQL server. You may need to create a server first."
+ },
+ {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "displayName": "MySQL Server Database",
+ "value": "sampledb",
+ "description": "The database hosted by the MySQL server to be used by the application."
+ },
+ {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "displayName": "MySQL Server Username",
+ "description": "The username used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "displayName": "MySQL Server Password",
+ "description": "The password used to authenticate with the MySQL server. Leave it empty if authentication is disabled."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9411,
+ "protocol": "TCP",
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8080,
+ "name": "http"
+ },
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } , {
+ "name": "MYSQL_SERVICE_NAME",
+ "value": "${MYSQL_SERVICE_NAME}"
+ }, {
+ "name": "MYSQL_SERVICE_DATABASE",
+ "value": "${MYSQL_SERVICE_DATABASE}"
+ }, {
+ "name": "MYSQL_SERVICE_USERNAME",
+ "value": "${MYSQL_SERVICE_USERNAME}"
+ }, {
+ "name": "MYSQL_SERVICE_PASSWORD",
+ "value": "${MYSQL_SERVICE_PASSWORD}"
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json
new file mode 100644
index 000000000..cf9a4e903
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json
@@ -0,0 +1,343 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot, Camel and JBoss Data Virtualization QuickStart. This example demonstrates how to connect Apache Camel to a remote JBoss Data Virtualization (or Teiid) Server using the JDBC protocol.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-teiid"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-teiid"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-teiid",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-teiid.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-teiid-1.0.0.redhat-000053",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "TEIID_SERVICE_NAME",
+ "displayName": "JDV Server Name",
+ "required": true,
+ "value": "datavirt-app",
+ "description": "Set this to the name of the JDV Server. You may need to create an instance before."
+ },
+ {
+ "name": "TEIID_PORT_NAME",
+ "displayName": "JDV Port Name",
+ "value": "jdbc",
+ "description": "Set this to the name of the JDV port to use. Set this value if the JDV service contains multiple named ports."
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "displayName": "JDV Server Username",
+ "required": true,
+ "description": "The username used to authenticate with the JDV Server."
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "displayName": "JDV Server Password",
+ "required": true,
+ "description": "The password used to authenticate with the JDV Server."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000053",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [
+ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ }, {
+ "name": "TEIID_SERVICE_NAME",
+ "value": "${TEIID_SERVICE_NAME}"
+ }, {
+ "name": "TEIID_PORT_NAME",
+ "value": "${TEIID_PORT_NAME}"
+ }, {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ }, {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ }],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json
new file mode 100644
index 000000000..c78a96f7c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and Camel QuickStart. This example demonstrates how you can use Apache Camel with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a Camel route that triggeres a message every 5th second, and routes the message to a log.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json
new file mode 100644
index 000000000..620425902
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and Camel Xml QuickStart. This example demonstrates how you can use Apache Camel with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a Camel route (in Spring xml) that triggeres a message every 5th second, and routes the message to a log.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-camel-xml"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-camel-xml"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-camel-xml",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-camel-xml.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-camel-xml-1.0.0.redhat-000055",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000055",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json
new file mode 100644
index 000000000..15cfc93fd
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and CXF JAXRS QuickStart. This example demonstrates how you can use Apache CXF JAXRS with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a CXF JAXRS endpoint with Swagger enabled.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-cxf-jaxrs"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-cxf-jaxrs"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-cxf-jaxrs",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-cxf-jaxrs.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-cxf-jaxrs-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json
new file mode 100644
index 000000000..c70ee7726
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json
@@ -0,0 +1,305 @@
+{
+ "apiVersion": "v1",
+ "kind": "Template",
+ "metadata": {
+ "annotations": {
+ "description": "Spring-Boot and CXF JAXWS QuickStart. This example demonstrates how you can use Apache CXF JAXWS with Spring Boot on Openshift. The quickstart uses Spring Boot to configure a little application that includes a CXF JAXWS endpoint.",
+ "tags": "quickstart,java,springboot,fis",
+ "iconClass": "icon-jboss",
+ "version": "2.0"
+ },
+ "name": "s2i-spring-boot-cxf-jaxws"
+ },
+ "labels": {
+ "template": "s2i-spring-boot-cxf-jaxws"
+ },
+ "parameters": [
+ {
+ "name": "APP_NAME",
+ "displayName": "Application Name",
+ "required": true,
+ "value": "s2i-spring-boot-cxf-jaxws",
+ "description": "The name assigned to the application."
+ },
+ {
+ "name": "GIT_REPO",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "value": "https://github.com/fabric8-quickstarts/spring-boot-cxf-jaxws.git",
+ "description": "The URL of the repository with your application source code."
+ },
+ {
+ "name": "GIT_REF",
+ "displayName": "Git Reference",
+ "value": "spring-boot-cxf-jaxws-1.0.0.redhat-000005",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "BUILDER_VERSION",
+ "displayName": "Builder version",
+ "value": "2.0",
+ "description": "The version of the FIS S2I builder image to use."
+ },
+ {
+ "name": "APP_VERSION",
+ "displayName": "Application Version",
+ "value": "1.0.0.redhat-000005",
+ "description": "The application version."
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "displayName": "Maven Arguments",
+ "value": "package -DskipTests -Dfabric8.skip -e -B",
+ "description": "Arguments passed to mvn in the build."
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "displayName": "Extra Maven Arguments",
+ "description": "Extra arguments passed to mvn, e.g. for multi-module builds."
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "displayName": "Maven build directory",
+ "description": "Directory of the artifact to be built, e.g. for multi-module builds."
+ },
+ {
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "displayName": "Image Stream Namespace",
+ "value": "openshift",
+ "required": true,
+ "description": "Namespace in which the Fuse ImageStreams are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project."
+ },
+ {
+ "name": "BUILD_SECRET",
+ "displayName": "Git Build Secret",
+ "generate": "expression",
+ "description": "The secret needed to trigger a build.",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CPU_REQUEST",
+ "displayName": "CPU request",
+ "value": "0.2",
+ "required": true,
+ "description": "The amount of CPU to requests."
+ },
+ {
+ "name": "CPU_LIMIT",
+ "displayName": "CPU limit",
+ "value": "1.0",
+ "required": true,
+ "description": "The amount of CPU the container is limited to use."
+ }
+ ],
+ "objects": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {},
+ "status": {
+ "dockerImageRepository": ""
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${BUILD_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ }
+ ],
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${GIT_REPO}",
+ "ref": "${GIT_REF}"
+ }
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "fis-java-openshift:${BUILDER_VERSION}"
+ },
+ "forcePull": true,
+ "incremental": true,
+ "env": [
+ {
+ "name": "BUILD_LOGLEVEL",
+ "value": "5"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS",
+ "value": "${MAVEN_ARGS}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": "${MAVEN_ARGS_APPEND}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ },
+ "resources": {}
+ },
+ "status": {
+ "lastVersion": 0
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APP_NAME}",
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APP_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APP_NAME}:latest"
+ }
+ }
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "${APP_NAME}",
+ "deploymentconfig": "${APP_NAME}",
+ "group": "quickstarts",
+ "project": "${APP_NAME}",
+ "provider": "s2i",
+ "version": "${APP_VERSION}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APP_NAME}",
+ "image": "library/${APP_NAME}:latest",
+ "readinessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 10
+ },
+ "livenessProbe" : {
+ "httpGet" : {
+ "path" : "/health",
+ "port" : 8081
+ },
+ "initialDelaySeconds" : 180
+ },
+ "ports": [
+ {
+ "containerPort": 8778,
+ "name": "jolokia"
+ }
+ ],
+ "env" : [ {
+ "name" : "KUBERNETES_NAMESPACE",
+ "valueFrom" : {
+ "fieldRef" : {
+ "fieldPath" : "metadata.namespace"
+ }
+ }
+ } ],
+ "resources": {
+ "requests": {
+ "cpu": "${CPU_REQUEST}"
+ },
+ "limits": {
+ "cpu": "${CPU_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "status": {}
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json
new file mode 100644
index 000000000..fb0578a67
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json
@@ -0,0 +1,514 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.0",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,java,jboss,xpaas",
+ "version" : "1.3.2"
+ },
+ "name": "sso70-https"
+ },
+ "labels": {
+ "template": "sso70-https",
+ "xpaas" : "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso70-openshift:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json
new file mode 100644
index 000000000..dcbb24bf1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json
@@ -0,0 +1,750 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.0 MySQL applications with persistent storage",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,mysql,java,database,jboss,xpaas",
+ "version" : "1.3.2"
+ },
+ "name": "sso70-mysql-persistent"
+ },
+ "labels": {
+ "template": "sso70-mysql-persistent",
+ "xpaas" : "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso70-openshift:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json
new file mode 100644
index 000000000..1768f7a1b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json
@@ -0,0 +1,719 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.0 MySQL applications",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,mysql,java,database,jboss,xpaas",
+ "version" : "1.3.2"
+ },
+ "name": "sso70-mysql"
+ },
+ "labels": {
+ "template": "sso70-mysql",
+ "xpaas" : "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso70-openshift:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json
new file mode 100644
index 000000000..4c2f81f2e
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json
@@ -0,0 +1,727 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.0 PostgreSQL applications with persistent storage",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,postrgresql,java,database,jboss,xpaas",
+ "version" : "1.3.2"
+ },
+ "name": "sso70-postgresql-persistent"
+ },
+ "labels": {
+ "template": "sso70-postgresql-persistent",
+ "xpaas" : "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso70-openshift:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json
new file mode 100644
index 000000000..d8402ef72
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json
@@ -0,0 +1,696 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.0 PostgreSQL applications",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,postrgresql,java,database,jboss,xpaas",
+ "version" : "1.3.2"
+ },
+ "name": "sso70-postgresql"
+ },
+ "labels": {
+ "template": "sso70-postgresql",
+ "xpaas" : "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso70-openshift:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_excluder/README.md b/roles/openshift_excluder/README.md
index e76a15952..e048bd107 100644
--- a/roles/openshift_excluder/README.md
+++ b/roles/openshift_excluder/README.md
@@ -18,8 +18,6 @@ Facts
| enable_docker_excluder | enable_excluders | Enable docker excluder. If not set, the docker excluder is ignored. |
| enable_openshift_excluder | enable_excluders | Enable openshift excluder. If not set, the openshift excluder is ignored. |
| enable_excluders | None | Enable all excluders
-| enable_docker_excluder_override | None | indication the docker excluder needs to be enabled |
-| disable_openshift_excluder_override | None | indication the openshift excluder needs to be disabled |
Role Variables
--------------
diff --git a/roles/openshift_excluder/tasks/adjust.yml b/roles/openshift_excluder/tasks/adjust.yml
deleted file mode 100644
index 2535b9ea6..000000000
--- a/roles/openshift_excluder/tasks/adjust.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-# Depending on enablement of individual excluders and their status
-# some excluders needs to be disabled, resp. enabled
-# By default, all excluders are disabled unless overrided.
-- block:
- - include: init.yml
- # All excluders that are to be enabled are enabled
- - include: exclude.yml
- vars:
- # Enable the docker excluder only if it is overrided
- enable_docker_excluder: "{{ enable_docker_excluder_override | default(false) | bool }}"
- # excluder is to be disabled by default
- enable_openshift_excluder: false
- # All excluders that are to be disabled are disabled
- - include: unexclude.yml
- vars:
- # If the docker override is not set, default to the generic behaviour
- disable_docker_excluder: "{{ not enable_docker_excluder_override | default(not docker_excluder_on) | bool }}"
- # disable openshift excluder is never overrided to be enabled
- # disable it if the docker excluder is enabled
- disable_openshift_excluder: "{{ openshift_excluder_on | bool }}"
- when:
- - not openshift.common.is_atomic | bool
diff --git a/roles/openshift_excluder/tasks/disable.yml b/roles/openshift_excluder/tasks/disable.yml
index a8deb3eb1..97044fff6 100644
--- a/roles/openshift_excluder/tasks/disable.yml
+++ b/roles/openshift_excluder/tasks/disable.yml
@@ -1,11 +1,17 @@
---
# input variables
-# - with_status_check
-# - with_install
# - excluder_package_state
# - docker_excluder_package_state
- include: init.yml
+# unexclude the current openshift/origin-excluder if it is installed so it can be updated
+- include: unexclude.yml
+ vars:
+ unexclude_docker_excluder: false
+ unexclude_openshift_excluder: "{{ openshift_excluder_on | bool }}"
+ when:
+ - not openshift.common.is_atomic | bool
+
# Install any excluder that is enabled
- include: install.yml
vars:
@@ -18,9 +24,24 @@
# it the docker excluder is enabled, we install it and in case its status is non-zero
# it is enabled no matter what
-# Check the current state of all excluders
-- include: status.yml
- when: with_status_check | default(docker_excluder_on or openshift_excluder_on) | bool
-
- # And finally adjust an excluder in order to update host components correctly
-- include: adjust.yml
+# And finally adjust an excluder in order to update host components correctly. First
+# exclude then unexclude
+- block:
+ - include: exclude.yml
+ vars:
+ # Enable the docker excluder only if it is overrided
+ # BZ #1430612: docker excluders should be enabled even during installation and upgrade
+ exclude_docker_excluder: "{{ docker_excluder_on | bool }}"
+ # excluder is to be disabled by default
+ exclude_openshift_excluder: false
+ # All excluders that are to be disabled are disabled
+ - include: unexclude.yml
+ vars:
+ # If the docker override is not set, default to the generic behaviour
+ # BZ #1430612: docker excluders should be enabled even during installation and upgrade
+ unexclude_docker_excluder: false
+ # disable openshift excluder is never overrided to be enabled
+ # disable it if the docker excluder is enabled
+ unexclude_openshift_excluder: "{{ openshift_excluder_on | bool }}"
+ when:
+ - not openshift.common.is_atomic | bool
diff --git a/roles/openshift_excluder/tasks/enable.yml b/roles/openshift_excluder/tasks/enable.yml
index 413c7b5cf..e719325bc 100644
--- a/roles/openshift_excluder/tasks/enable.yml
+++ b/roles/openshift_excluder/tasks/enable.yml
@@ -1,6 +1,5 @@
---
# input variables:
-# - with_install
- block:
- include: init.yml
@@ -8,14 +7,12 @@
vars:
install_docker_excluder: "{{ docker_excluder_on | bool }}"
install_openshift_excluder: "{{ openshift_excluder_on | bool }}"
- when: with_install | default(docker_excluder_on or openshift_excluder_on) | bool
+ when: docker_excluder_on or openshift_excluder_on | bool
- include: exclude.yml
vars:
- # Enable the docker excluder only if it is overrided, resp. enabled by default (in that order)
- enable_docker_excluder: "{{ enable_docker_excluder_override | default(docker_excluder_on) | bool }}"
- # Enable the openshift excluder only if it is not overrided, resp. enabled by default (in that order)
- enable_openshift_excluder: "{{ not disable_openshift_excluder_override | default(not openshift_excluder_on) | bool }}"
+ exclude_docker_excluder: "{{ docker_excluder_on | bool }}"
+ exclude_openshift_excluder: "{{ openshift_excluder_on | bool }}"
when:
- not openshift.common.is_atomic | bool
diff --git a/roles/openshift_excluder/tasks/exclude.yml b/roles/openshift_excluder/tasks/exclude.yml
index af9824aae..ca18d343f 100644
--- a/roles/openshift_excluder/tasks/exclude.yml
+++ b/roles/openshift_excluder/tasks/exclude.yml
@@ -1,20 +1,30 @@
---
# input variables:
-# - enable_docker_excluder
-# - enable_openshift_excluder
+# - exclude_docker_excluder
+# - exclude_openshift_excluder
- block:
+
+ - name: Check for docker-excluder
+ stat:
+ path: /sbin/{{ openshift.common.service_type }}-docker-excluder
+ register: docker_excluder_stat
- name: Enable docker excluder
command: "{{ openshift.common.service_type }}-docker-excluder exclude"
- # if the docker override is set, it means the docker excluder needs to be enabled no matter what
- # if the docker override is not set, the excluder is set based on enable_docker_excluder
when:
- - enable_docker_excluder | default(false) | bool
+ - exclude_docker_excluder | default(false) | bool
+ - docker_excluder_stat.stat.exists
+ - name: Check for openshift excluder
+ stat:
+ path: /sbin/{{ openshift.common.service_type }}-excluder
+ register: openshift_excluder_stat
- name: Enable openshift excluder
command: "{{ openshift.common.service_type }}-excluder exclude"
# if the openshift override is set, it means the openshift excluder is disabled no matter what
# if the openshift override is not set, the excluder is set based on enable_openshift_excluder
when:
- - enable_openshift_excluder | default(false) | bool
+ - exclude_openshift_excluder | default(false) | bool
+ - openshift_excluder_stat.stat.exists
+
when:
- not openshift.common.is_atomic | bool
diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml
index dcc8df0cb..3490a613e 100644
--- a/roles/openshift_excluder/tasks/install.yml
+++ b/roles/openshift_excluder/tasks/install.yml
@@ -6,14 +6,14 @@
- name: Install docker excluder
package:
- name: "{{ openshift.common.service_type }}-docker-excluder"
+ name: "{{ openshift.common.service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
state: "{{ docker_excluder_package_state }}"
when:
- install_docker_excluder | default(true) | bool
- name: Install openshift excluder
package:
- name: "{{ openshift.common.service_type }}-excluder"
+ name: "{{ openshift.common.service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
state: "{{ openshift_excluder_package_state }}"
when:
- install_openshift_excluder | default(true) | bool
diff --git a/roles/openshift_excluder/tasks/main.yml b/roles/openshift_excluder/tasks/main.yml
deleted file mode 100644
index 78a3d37cb..000000000
--- a/roles/openshift_excluder/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-include: status.yml
diff --git a/roles/openshift_excluder/tasks/status.yml b/roles/openshift_excluder/tasks/status.yml
deleted file mode 100644
index 363ccdbea..000000000
--- a/roles/openshift_excluder/tasks/status.yml
+++ /dev/null
@@ -1,84 +0,0 @@
----
-- name: Determine if excluder packages are installed
- rpm_q:
- name: "{{ openshift.common.service_type }}-excluder"
- state: present
- register: openshift_excluder_installed
- failed_when: false
-
-# docker excluder needs to be enable by default
-- name: Determine if docker packages are installed
- rpm_q:
- name: "{{ openshift.common.service_type }}-docker-excluder"
- state: present
- register: docker_excluder_installed
- failed_when: false
-
-# The excluder status function returns 0 when everything is excluded
-# and 1 if any packages are missing from the exclusions list and outputs a warning to stderr
-# # atomic-openshift-excluder status ; echo $?
-# exclude -- All packages excluded
-# 0
-# # atomic-openshift-excluder unexclude
-# # atomic-openshift-excluder status ; echo $?
-# unexclude -- At least one package not excluded
-# 1
-
-- block:
- - include: init.yml
- - block:
- - name: Record openshift excluder status
- command: "{{ openshift.common.service_type }}-excluder status"
- register: excluder_status
- failed_when: false
-
- # Even though the openshift excluder is enabled
- # if the status is non-zero, disabled the excluder
- - name: Override openshift excluder enablement if the status is non-zero
- set_fact:
- disable_openshift_excluder_override: true
- when:
- - "{{ excluder_status.rc | default(0) != 0 }}"
-
- - debug:
- msg: "Disabling openshift excluder"
- when:
- - "{{ excluder_status.rc | default(0) != 0 }}"
-
- when:
- - "{{ openshift_excluder_installed.installed_versions | default([]) | length > 0 }}"
- - "{{ openshift_excluder_on }}"
-
- - block:
- - name: Record docker excluder status
- command: "{{ openshift.common.service_type }}-docker-excluder status"
- register: docker_excluder_status
- failed_when: false
-
- # If the docker excluder is installed and the status is non-zero
- # always enable the docker excluder
- - name: Override docker excluder enablement if the status is non-zero
- set_fact:
- enable_docker_excluder_override: true
- when:
- - "{{ docker_excluder_status.rc | default(0) != 0 }}"
-
- - debug:
- msg: "Enabling docker excluder"
- when:
- - "{{ docker_excluder_status.rc | default(0) != 0 }}"
-
- # As the docker excluder status is not satisfied,
- # re-enable entire docker excluder again
- # At the same time keep the override set in a case other task would
- - name: Enable docker excluder
- command: "{{ openshift.common.service_type }}-docker-excluder exclude"
-
- # Run the docker excluder status even if the excluder is disabled.
- # In order to determine of the excluder needs to be enabled.
- when:
- - "{{ docker_excluder_installed.installed_versions | default([]) | length > 0 }}"
- - "{{ docker_excluder_on }}"
-
- when:
- - not openshift.common.is_atomic | bool
diff --git a/roles/openshift_excluder/tasks/unexclude.yml b/roles/openshift_excluder/tasks/unexclude.yml
index 196ca25f5..4df7f14b4 100644
--- a/roles/openshift_excluder/tasks/unexclude.yml
+++ b/roles/openshift_excluder/tasks/unexclude.yml
@@ -1,19 +1,28 @@
---
# input variables:
-# - disable_docker_excluder
-# - disable_openshift_excluder
+# - unexclude_docker_excluder
+# - unexclude_openshift_excluder
- block:
- - include: init.yml
+ - name: Check for docker-excluder
+ stat:
+ path: /sbin/{{ openshift.common.service_type }}-docker-excluder
+ register: docker_excluder_stat
- name: disable docker excluder
command: "{{ openshift.common.service_type }}-docker-excluder unexclude"
when:
- - disable_docker_excluder | default(false) | bool
+ - unexclude_docker_excluder | default(false) | bool
+ - docker_excluder_stat.stat.exists
+ - name: Check for openshift excluder
+ stat:
+ path: /sbin/{{ openshift.common.service_type }}-excluder
+ register: openshift_excluder_stat
- name: disable openshift excluder
command: "{{ openshift.common.service_type }}-excluder unexclude"
when:
- - disable_openshift_excluder | default(false) | bool
+ - unexclude_openshift_excluder | default(false) | bool
+ - openshift_excluder_stat.stat.exists
when:
- not openshift.common.is_atomic | bool
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 8ea900e21..7edf141e5 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -19,8 +19,8 @@ import struct
import socket
from distutils.util import strtobool
from distutils.version import LooseVersion
-from six import string_types, text_type
-from six.moves import configparser
+from ansible.module_utils.six import string_types, text_type
+from ansible.module_utils.six.moves import configparser
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
@@ -467,6 +467,24 @@ def set_flannel_facts_if_unset(facts):
return facts
+def set_calico_facts_if_unset(facts):
+ """ Set calico facts if not already present in facts dict
+ dict: the facts dict updated with the calico facts if
+ missing
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the calico
+ facts if they were not already present
+
+ """
+ if 'common' in facts:
+ if 'use_calico' not in facts['common']:
+ use_calico = False
+ facts['common']['use_calico'] = use_calico
+ return facts
+
+
def set_nuage_facts_if_unset(facts):
""" Set nuage facts if not already present in facts dict
dict: the facts dict updated with the nuage facts if
@@ -918,7 +936,9 @@ def set_version_facts_if_unset(facts):
facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
facts['common']['version_gte_3_6_or_1_6'] = version_gte_3_6_or_1_6
- if version_gte_3_5_or_1_5:
+ if version_gte_3_6_or_1_6:
+ examples_content_version = 'v1.6'
+ elif version_gte_3_5_or_1_5:
examples_content_version = 'v1.5'
elif version_gte_3_4_or_1_4:
examples_content_version = 'v1.4'
@@ -1953,6 +1973,7 @@ class OpenShiftFacts(object):
facts = set_url_facts_if_unset(facts)
facts = set_project_cfg_facts_if_unset(facts)
facts = set_flannel_facts_if_unset(facts)
+ facts = set_calico_facts_if_unset(facts)
facts = set_nuage_facts_if_unset(facts)
facts = set_contiv_facts_if_unset(facts)
facts = set_node_schedulability(facts)
diff --git a/roles/openshift_facts/meta/main.yml b/roles/openshift_facts/meta/main.yml
index 0be3afd24..7eead2d6e 100644
--- a/roles/openshift_facts/meta/main.yml
+++ b/roles/openshift_facts/meta/main.yml
@@ -12,4 +12,5 @@ galaxy_info:
categories:
- cloud
- system
-dependencies: []
+dependencies:
+- role: openshift_sanitize_inventory
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 73c668c72..f657d86cf 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -47,6 +47,12 @@
when: l_is_atomic | bool
+- name: Load variables
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ ansible_distribution }}.yml"
+ - "default.yml"
+
- name: Ensure various deps are installed
package: name={{ item }} state=present
with_items: "{{ required_packages }}"
@@ -64,8 +70,7 @@
role: common
local_facts:
debug_level: "{{ openshift_debug_level | default(2) }}"
- # TODO: Deprecate deployment_type in favor of openshift_deployment_type
- deployment_type: "{{ openshift_deployment_type | default(deployment_type) }}"
+ deployment_type: "{{ openshift_deployment_type }}"
deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
cluster_id: "{{ openshift_cluster_id | default('default') }}"
hostname: "{{ openshift_hostname | default(None) }}"
diff --git a/roles/openshift_facts/vars/Fedora.yml b/roles/openshift_facts/vars/Fedora.yml
new file mode 100644
index 000000000..745f5f398
--- /dev/null
+++ b/roles/openshift_facts/vars/Fedora.yml
@@ -0,0 +1,6 @@
+---
+required_packages:
+ - iproute
+ - python3-dbus
+ - PyYAML
+ - yum-utils
diff --git a/roles/openshift_facts/vars/default.yml b/roles/openshift_facts/vars/default.yml
new file mode 100644
index 000000000..3cd616d16
--- /dev/null
+++ b/roles/openshift_facts/vars/default.yml
@@ -0,0 +1,6 @@
+---
+required_packages:
+ - iproute
+ - python-dbus
+ - PyYAML
+ - yum-utils
diff --git a/roles/openshift_facts/vars/main.yml b/roles/openshift_facts/vars/main.yml
index 07f5100ad..89d4034d3 100644
--- a/roles/openshift_facts/vars/main.yml
+++ b/roles/openshift_facts/vars/main.yml
@@ -1,11 +1,4 @@
---
-required_packages:
- - iproute
- - python-dbus
- - python-six
- - PyYAML
- - yum-utils
-
required_system_containers_packages:
- atomic
- ostree
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index 0411797b1..03c40b78b 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -4,6 +4,7 @@ Ansible action plugin to execute health checks in OpenShift clusters.
# pylint: disable=wrong-import-position,missing-docstring,invalid-name
import sys
import os
+from collections import defaultdict
try:
from __main__ import display
@@ -17,7 +18,7 @@ from ansible.plugins.action import ActionBase
# this callback plugin.
sys.path.insert(1, os.path.dirname(os.path.dirname(__file__)))
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException # noqa: E402
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, load_checks # noqa: E402
class ActionModule(ActionBase):
@@ -41,20 +42,11 @@ class ActionModule(ActionBase):
return result
args = self._task.args
- requested_checks = resolve_checks(args.get("checks", []), known_checks.values())
-
- unknown_checks = requested_checks - set(known_checks)
- if unknown_checks:
- result["failed"] = True
- result["msg"] = (
- "One or more checks are unknown: {}. "
- "Make sure there is no typo in the playbook and no files are missing."
- ).format(", ".join(unknown_checks))
- return result
+ resolved_checks = resolve_checks(args.get("checks", []), known_checks.values())
result["checks"] = check_results = {}
- for check_name in requested_checks & set(known_checks):
+ for check_name in resolved_checks:
display.banner("CHECK [{} : {}]".format(check_name, task_vars["ansible_host"]))
check = known_checks[check_name]
@@ -74,14 +66,14 @@ class ActionModule(ActionBase):
result["failed"] = True
result["msg"] = "One or more checks failed"
+ result["changed"] = any(r.get("changed", False) for r in check_results.values())
return result
def load_known_checks(self):
- known_checks = {}
+ load_checks()
- known_check_classes = set(cls for cls in OpenShiftCheck.subclasses())
-
- for cls in known_check_classes:
+ known_checks = {}
+ for cls in OpenShiftCheck.subclasses():
check_name = cls.name
if check_name in known_checks:
other_cls = known_checks[check_name].__class__
@@ -90,27 +82,46 @@ class ActionModule(ActionBase):
check_name,
cls.__module__, cls.__name__,
other_cls.__module__, other_cls.__name__))
- known_checks[check_name] = cls(module_executor=self._execute_module)
-
+ known_checks[check_name] = cls(execute_module=self._execute_module)
return known_checks
def resolve_checks(names, all_checks):
"""Returns a set of resolved check names.
- Resolving a check name involves expanding tag references (e.g., '@tag') with
- all the checks that contain the given tag.
+ Resolving a check name expands tag references (e.g., "@tag") to all the
+ checks that contain the given tag. OpenShiftCheckException is raised if
+ names contains an unknown check or tag name.
names should be a sequence of strings.
all_checks should be a sequence of check classes/instances.
"""
- resolved = set()
- for name in names:
- if name.startswith("@"):
- for check in all_checks:
- if name[1:] in check.tags:
- resolved.add(check.name)
- else:
- resolved.add(name)
+ known_check_names = set(check.name for check in all_checks)
+ known_tag_names = set(name for check in all_checks for name in check.tags)
+
+ check_names = set(name for name in names if not name.startswith('@'))
+ tag_names = set(name[1:] for name in names if name.startswith('@'))
+
+ unknown_check_names = check_names - known_check_names
+ unknown_tag_names = tag_names - known_tag_names
+
+ if unknown_check_names or unknown_tag_names:
+ msg = []
+ if unknown_check_names:
+ msg.append('Unknown check names: {}.'.format(', '.join(sorted(unknown_check_names))))
+ if unknown_tag_names:
+ msg.append('Unknown tag names: {}.'.format(', '.join(sorted(unknown_tag_names))))
+ msg.append('Make sure there is no typo in the playbook and no files are missing.')
+ raise OpenShiftCheckException('\n'.join(msg))
+
+ tag_to_checks = defaultdict(set)
+ for check in all_checks:
+ for tag in check.tags:
+ tag_to_checks[tag].add(check.name)
+
+ resolved = check_names.copy()
+ for tag in tag_names:
+ resolved.update(tag_to_checks[tag])
+
return resolved
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index 8caefab15..208e81048 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -53,11 +53,11 @@ class CallbackModule(CallbackBase):
subsequent_extra_indent = u' ' * (initial_indent_len + 10)
for i, failure in enumerate(self.__failures, 1):
- lines = _format_failure(failure)
- self._display.display(u'\n{}{}'.format(initial_indent_format.format(i), lines[0]))
- for line in lines[1:]:
- line = line.replace(u'\n', u'\n' + subsequent_extra_indent)
- indented = u'{}{}'.format(subsequent_indent, line)
+ entries = _format_failure(failure)
+ self._display.display(u'\n{}{}'.format(initial_indent_format.format(i), entries[0]))
+ for entry in entries[1:]:
+ entry = entry.replace(u'\n', u'\n' + subsequent_extra_indent)
+ indented = u'{}{}'.format(subsequent_indent, entry)
self._display.display(indented)
@@ -66,8 +66,9 @@ class CallbackModule(CallbackBase):
# Status: permanently disabled unless Ansible's API changes.
# pylint: disable=protected-access
def _format_failure(failure):
- '''Return a list of pretty-formatted lines describing a failure, including
- relevant information about it. Line separators are not included.'''
+ '''Return a list of pretty-formatted text entries describing a failure, including
+ relevant information about it. Expect that the list of text entries will be joined
+ by a newline separator when output to the user.'''
result = failure['result']
host = result._host.get_name()
play = _get_play(result._task)
@@ -75,16 +76,29 @@ def _format_failure(failure):
play = play.get_name()
task = result._task.get_name()
msg = result._result.get('msg', u'???')
- rows = (
+ fields = (
(u'Host', host),
(u'Play', play),
(u'Task', task),
(u'Message', stringc(msg, C.COLOR_ERROR)),
)
if 'checks' in result._result:
- rows += ((u'Details', stringc(pformat(result._result['checks']), C.COLOR_ERROR)),)
+ fields += ((u'Details', _format_failed_checks(result._result['checks'])),)
row_format = '{:10}{}'
- return [row_format.format(header + u':', body) for header, body in rows]
+ return [row_format.format(header + u':', body) for header, body in fields]
+
+
+def _format_failed_checks(checks):
+ '''Return pretty-formatted text describing checks that failed.'''
+ failed_check_msgs = []
+ for check, body in checks.items():
+ if body.get('failed', False): # only show the failed checks
+ msg = body.get('msg', u"Failed without returning a message")
+ failed_check_msgs.append('check "%s":\n%s' % (check, msg))
+ if failed_check_msgs:
+ return stringc("\n\n".join(failed_check_msgs), C.COLOR_ERROR)
+ else: # something failed but no checks will admit to it, so dump everything
+ return stringc(pformat(checks), C.COLOR_ERROR)
# Reason: disable pylint protected-access because we need to access _*
diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py
index 13b7d310b..191a4b107 100755
--- a/roles/openshift_health_checker/library/aos_version.py
+++ b/roles/openshift_health_checker/library/aos_version.py
@@ -32,6 +32,7 @@ def main(): # pylint: disable=missing-docstring,too-many-branches
bail("prefix must not be empty")
yb = yum.YumBase() # pylint: disable=invalid-name
+ yb.conf.disable_excludes = ["all"] # assume the openshift excluder will be managed, ignore current state
# search for package versions available for aos pkgs
expected_pkgs = [
diff --git a/roles/openshift_health_checker/library/check_yum_update.py b/roles/openshift_health_checker/library/check_yum_update.py
index 9bc14fd47..630ebc848 100755
--- a/roles/openshift_health_checker/library/check_yum_update.py
+++ b/roles/openshift_health_checker/library/check_yum_update.py
@@ -27,6 +27,7 @@ def main(): # pylint: disable=missing-docstring,too-many-branches
module.fail_json(msg=error)
yb = yum.YumBase() # pylint: disable=invalid-name
+ yb.conf.disable_excludes = ["all"] # assume the openshift excluder will be managed, ignore current state
# determine if the existing yum configuration is valid
try:
yb.repos.populateSack(mdtype='metadata', cacheonly=1)
diff --git a/roles/openshift_health_checker/library/docker_container.py b/roles/openshift_health_checker/library/docker_container.py
new file mode 100644
index 000000000..f81b4ec01
--- /dev/null
+++ b/roles/openshift_health_checker/library/docker_container.py
@@ -0,0 +1,2036 @@
+#!/usr/bin/python
+# pylint: skip-file
+# flake8: noqa
+
+# TODO: remove this file once openshift-ansible requires ansible >= 2.3.
+# This file is a copy of
+# https://github.com/ansible/ansible/blob/20bf02f/lib/ansible/modules/cloud/docker/docker_container.py.
+# It has been temporarily vendored here due to issue https://github.com/ansible/ansible/issues/22323.
+
+
+# Copyright 2016 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: docker_container
+
+short_description: manage docker containers
+
+description:
+ - Manage the life cycle of docker containers.
+ - Supports check mode. Run with --check and --diff to view config difference and list of actions to be taken.
+
+version_added: "2.1"
+
+options:
+ blkio_weight:
+ description:
+ - Block IO (relative weight), between 10 and 1000.
+ default: null
+ required: false
+ capabilities:
+ description:
+ - List of capabilities to add to the container.
+ default: null
+ required: false
+ cleanup:
+ description:
+ - Use with I(detach) to remove the container after successful execution.
+ default: false
+ required: false
+ version_added: "2.2"
+ command:
+ description:
+ - Command to execute when the container starts.
+ default: null
+ required: false
+ cpu_period:
+ description:
+ - Limit CPU CFS (Completely Fair Scheduler) period
+ default: 0
+ required: false
+ cpu_quota:
+ description:
+ - Limit CPU CFS (Completely Fair Scheduler) quota
+ default: 0
+ required: false
+ cpuset_cpus:
+ description:
+ - CPUs in which to allow execution C(1,3) or C(1-3).
+ default: null
+ required: false
+ cpuset_mems:
+ description:
+ - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1)
+ default: null
+ required: false
+ cpu_shares:
+ description:
+ - CPU shares (relative weight).
+ default: null
+ required: false
+ detach:
+ description:
+ - Enable detached mode to leave the container running in background.
+ If disabled, the task will reflect the status of the container run (failed if the command failed).
+ default: true
+ required: false
+ devices:
+ description:
+ - "List of host device bindings to add to the container. Each binding is a mapping expressed
+ in the format: <path_on_host>:<path_in_container>:<cgroup_permissions>"
+ default: null
+ required: false
+ dns_servers:
+ description:
+ - List of custom DNS servers.
+ default: null
+ required: false
+ dns_search_domains:
+ description:
+ - List of custom DNS search domains.
+ default: null
+ required: false
+ env:
+ description:
+ - Dictionary of key,value pairs.
+ default: null
+ required: false
+ env_file:
+ version_added: "2.2"
+ description:
+ - Path to a file containing environment variables I(FOO=BAR).
+ - If variable also present in C(env), then C(env) value will override.
+ - Requires docker-py >= 1.4.0.
+ default: null
+ required: false
+ entrypoint:
+ description:
+ - Command that overwrites the default ENTRYPOINT of the image.
+ default: null
+ required: false
+ etc_hosts:
+ description:
+ - Dict of host-to-IP mappings, where each host name is a key in the dictionary.
+ Each host name will be added to the container's /etc/hosts file.
+ default: null
+ required: false
+ exposed_ports:
+ description:
+ - List of additional container ports which informs Docker that the container
+ listens on the specified network ports at runtime.
+ If the port is already exposed using EXPOSE in a Dockerfile, it does not
+ need to be exposed again.
+ default: null
+ required: false
+ aliases:
+ - exposed
+ force_kill:
+ description:
+ - Use the kill command when stopping a running container.
+ default: false
+ required: false
+ groups:
+ description:
+ - List of additional group names and/or IDs that the container process will run as.
+ default: null
+ required: false
+ hostname:
+ description:
+ - Container hostname.
+ default: null
+ required: false
+ ignore_image:
+ description:
+ - When C(state) is I(present) or I(started) the module compares the configuration of an existing
+ container to requested configuration. The evaluation includes the image version. If
+ the image version in the registry does not match the container, the container will be
+ recreated. Stop this behavior by setting C(ignore_image) to I(True).
+ default: false
+ required: false
+ version_added: "2.2"
+ image:
+ description:
+ - Repository path and tag used to create the container. If an image is not found or pull is true, the image
+ will be pulled from the registry. If no tag is included, 'latest' will be used.
+ default: null
+ required: false
+ interactive:
+ description:
+ - Keep stdin open after a container is launched, even if not attached.
+ default: false
+ required: false
+ ipc_mode:
+ description:
+ - Set the IPC mode for the container. Can be one of 'container:<name|id>' to reuse another
+ container's IPC namespace or 'host' to use the host's IPC namespace within the container.
+ default: null
+ required: false
+ keep_volumes:
+ description:
+ - Retain volumes associated with a removed container.
+ default: true
+ required: false
+ kill_signal:
+ description:
+ - Override default signal used to kill a running container.
+ default null:
+ required: false
+ kernel_memory:
+ description:
+ - "Kernel memory limit (format: <number>[<unit>]). Number is a positive integer.
+ Unit can be one of b, k, m, or g. Minimum is 4M."
+ default: 0
+ required: false
+ labels:
+ description:
+ - Dictionary of key value pairs.
+ default: null
+ required: false
+ links:
+ description:
+ - List of name aliases for linked containers in the format C(container_name:alias)
+ default: null
+ required: false
+ log_driver:
+ description:
+ - Specify the logging driver. Docker uses json-file by default.
+ choices:
+ - none
+ - json-file
+ - syslog
+ - journald
+ - gelf
+ - fluentd
+ - awslogs
+ - splunk
+ default: null
+ required: false
+ log_options:
+ description:
+ - Dictionary of options specific to the chosen log_driver. See https://docs.docker.com/engine/admin/logging/overview/
+ for details.
+ required: false
+ default: null
+ mac_address:
+ description:
+ - Container MAC address (e.g. 92:d0:c6:0a:29:33)
+ default: null
+ required: false
+ memory:
+ description:
+ - "Memory limit (format: <number>[<unit>]). Number is a positive integer.
+ Unit can be one of b, k, m, or g"
+ default: 0
+ required: false
+ memory_reservation:
+ description:
+ - "Memory soft limit (format: <number>[<unit>]). Number is a positive integer.
+ Unit can be one of b, k, m, or g"
+ default: 0
+ required: false
+ memory_swap:
+ description:
+ - Total memory limit (memory + swap, format:<number>[<unit>]).
+ Number is a positive integer. Unit can be one of b, k, m, or g.
+ default: 0
+ required: false
+ memory_swappiness:
+ description:
+ - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
+ default: 0
+ required: false
+ name:
+ description:
+ - Assign a name to a new container or match an existing container.
+ - When identifying an existing container name may be a name or a long or short container ID.
+ required: true
+ network_mode:
+ description:
+ - Connect the container to a network.
+ choices:
+ - bridge
+ - container:<name|id>
+ - host
+ - none
+ default: null
+ required: false
+ networks:
+ description:
+ - List of networks the container belongs to.
+ - Each network is a dict with keys C(name), C(ipv4_address), C(ipv6_address), C(links), C(aliases).
+ - For each network C(name) is required, all other keys are optional.
+ - If included, C(links) or C(aliases) are lists.
+ - For examples of the data structure and usage see EXAMPLES below.
+ - To remove a container from one or more networks, use the C(purge_networks) option.
+ default: null
+ required: false
+ version_added: "2.2"
+ oom_killer:
+ description:
+ - Whether or not to disable OOM Killer for the container.
+ default: false
+ required: false
+ oom_score_adj:
+ description:
+ - An integer value containing the score given to the container in order to tune OOM killer preferences.
+ default: 0
+ required: false
+ version_added: "2.2"
+ paused:
+ description:
+ - Use with the started state to pause running processes inside the container.
+ default: false
+ required: false
+ pid_mode:
+ description:
+ - Set the PID namespace mode for the container. Currently only supports 'host'.
+ default: null
+ required: false
+ privileged:
+ description:
+ - Give extended privileges to the container.
+ default: false
+ required: false
+ published_ports:
+ description:
+ - List of ports to publish from the container to the host.
+ - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a
+ container port, 9000 is a host port, and 0.0.0.0 is a host interface."
+ - Container ports must be exposed either in the Dockerfile or via the C(expose) option.
+ - A value of all will publish all exposed container ports to random host ports, ignoring
+ any other mappings.
+ - If C(networks) parameter is provided, will inspect each network to see if there exists
+ a bridge network with optional parameter com.docker.network.bridge.host_binding_ipv4.
+ If such a network is found, then published ports where no host IP address is specified
+ will be bound to the host IP pointed to by com.docker.network.bridge.host_binding_ipv4.
+ Note that the first bridge network with a com.docker.network.bridge.host_binding_ipv4
+ value encountered in the list of C(networks) is the one that will be used.
+ aliases:
+ - ports
+ required: false
+ default: null
+ pull:
+ description:
+ - If true, always pull the latest version of an image. Otherwise, will only pull an image when missing.
+ default: false
+ required: false
+ purge_networks:
+ description:
+ - Remove the container from ALL networks not included in C(networks) parameter.
+ - Any default networks such as I(bridge), if not found in C(networks), will be removed as well.
+ default: false
+ required: false
+ version_added: "2.2"
+ read_only:
+ description:
+ - Mount the container's root file system as read-only.
+ default: false
+ required: false
+ recreate:
+ description:
+ - Use with present and started states to force the re-creation of an existing container.
+ default: false
+ required: false
+ restart:
+ description:
+ - Use with started state to force a matching container to be stopped and restarted.
+ default: false
+ required: false
+ restart_policy:
+ description:
+ - Container restart policy. Place quotes around I(no) option.
+ choices:
+ - always
+ - no
+ - on-failure
+ - unless-stopped
+ default: on-failure
+ required: false
+ restart_retries:
+ description:
+ - Use with restart policy to control maximum number of restart attempts.
+ default: 0
+ required: false
+ shm_size:
+ description:
+ - Size of `/dev/shm`. The format is `<number><unit>`. `number` must be greater than `0`.
+ Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes).
+ - Omitting the unit defaults to bytes. If you omit the size entirely, the system uses `64m`.
+ default: null
+ required: false
+ security_opts:
+ description:
+ - List of security options in the form of C("label:user:User")
+ default: null
+ required: false
+ state:
+ description:
+ - 'I(absent) - A container matching the specified name will be stopped and removed. Use force_kill to kill the container
+ rather than stopping it. Use keep_volumes to retain volumes associated with the removed container.'
+ - 'I(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no
+ container matches the name, a container will be created. If a container matches the name but the provided configuration
+ does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created
+ with the requested config. Image version will be taken into account when comparing configuration. To ignore image
+ version use the ignore_image option. Use the recreate option to force the re-creation of the matching container. Use
+ force_kill to kill the container rather than stopping it. Use keep_volumes to retain volumes associated with a removed
+ container.'
+ - 'I(started) - Asserts there is a running container matching the name and any provided configuration. If no container
+ matches the name, a container will be created and started. If a container matching the name is found but the
+ configuration does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed
+ and a new container will be created with the requested configuration and started. Image version will be taken into
+ account when comparing configuration. To ignore image version use the ignore_image option. Use recreate to always
+ re-create a matching container, even if it is running. Use restart to force a matching container to be stopped and
+ restarted. Use force_kill to kill a container rather than stopping it. Use keep_volumes to retain volumes associated
+ with a removed container.'
+ - 'I(stopped) - Asserts that the container is first I(present), and then if the container is running moves it to a stopped
+ state. Use force_kill to kill a container rather than stopping it.'
+ required: false
+ default: started
+ choices:
+ - absent
+ - present
+ - stopped
+ - started
+ stop_signal:
+ description:
+ - Override default signal used to stop the container.
+ default: null
+ required: false
+ stop_timeout:
+ description:
+ - Number of seconds to wait for the container to stop before sending SIGKILL.
+ required: false
+ default: null
+ trust_image_content:
+ description:
+ - If true, skip image verification.
+ default: false
+ required: false
+ tty:
+ description:
+ - Allocate a psuedo-TTY.
+ default: false
+ required: false
+ ulimits:
+ description:
+ - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)"
+ default: null
+ required: false
+ user:
+ description:
+ - Sets the username or UID used and optionally the groupname or GID for the specified command.
+ - "Can be [ user | user:group | uid | uid:gid | user:gid | uid:group ]"
+ default: null
+ required: false
+ uts:
+ description:
+ - Set the UTS namespace mode for the container.
+ default: null
+ required: false
+ volumes:
+ description:
+ - List of volumes to mount within the container.
+ - "Use docker CLI-style syntax: C(/host:/container[:mode])"
+ - You can specify a read mode for the mount with either C(ro) or C(rw).
+ - SELinux hosts can additionally use C(z) or C(Z) to use a shared or
+ private label for the volume.
+ default: null
+ required: false
+ volume_driver:
+ description:
+ - The container volume driver.
+ default: none
+ required: false
+ volumes_from:
+ description:
+ - List of container names or Ids to get volumes from.
+ default: null
+ required: false
+extends_documentation_fragment:
+ - docker
+
+author:
+ - "Cove Schneider (@cove)"
+ - "Joshua Conner (@joshuaconner)"
+ - "Pavel Antonov (@softzilla)"
+ - "Thomas Steinbach (@ThomasSteinbach)"
+ - "Philippe Jandot (@zfil)"
+ - "Daan Oosterveld (@dusdanig)"
+ - "James Tanner (@jctanner)"
+ - "Chris Houseknecht (@chouseknecht)"
+
+requirements:
+ - "python >= 2.6"
+ - "docker-py >= 1.7.0"
+ - "Docker API >= 1.20"
+'''
+
+EXAMPLES = '''
+- name: Create a data container
+ docker_container:
+ name: mydata
+ image: busybox
+ volumes:
+ - /data
+
+- name: Re-create a redis container
+ docker_container:
+ name: myredis
+ image: redis
+ command: redis-server --appendonly yes
+ state: present
+ recreate: yes
+ exposed_ports:
+ - 6379
+ volumes_from:
+ - mydata
+
+- name: Restart a container
+ docker_container:
+ name: myapplication
+ image: someuser/appimage
+ state: started
+ restart: yes
+ links:
+ - "myredis:aliasedredis"
+ devices:
+ - "/dev/sda:/dev/xvda:rwm"
+ ports:
+ - "8080:9000"
+ - "127.0.0.1:8081:9001/udp"
+ env:
+ SECRET_KEY: ssssh
+
+- name: Container present
+ docker_container:
+ name: mycontainer
+ state: present
+ image: ubuntu:14.04
+ command: sleep infinity
+
+- name: Stop a container
+ docker_container:
+ name: mycontainer
+ state: stopped
+
+- name: Start 4 load-balanced containers
+ docker_container:
+ name: "container{{ item }}"
+ recreate: yes
+ image: someuser/anotherappimage
+ command: sleep 1d
+ with_sequence: count=4
+
+- name: remove container
+ docker_container:
+ name: ohno
+ state: absent
+
+- name: Syslogging output
+ docker_container:
+ name: myservice
+ image: busybox
+ log_driver: syslog
+ log_options:
+ syslog-address: tcp://my-syslog-server:514
+ syslog-facility: daemon
+ # NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for
+ # older docker installs, use "syslog-tag" instead
+ tag: myservice
+
+- name: Create db container and connect to network
+ docker_container:
+ name: db_test
+ image: "postgres:latest"
+ networks:
+ - name: "{{ docker_network_name }}"
+
+- name: Start container, connect to network and link
+ docker_container:
+ name: sleeper
+ image: ubuntu:14.04
+ networks:
+ - name: TestingNet
+ ipv4_address: "172.1.1.100"
+ aliases:
+ - sleepyzz
+ links:
+ - db_test:db
+ - name: TestingNet2
+
+- name: Start a container with a command
+ docker_container:
+ name: sleepy
+ image: ubuntu:14.04
+ command: sleep infinity
+
+- name: Add container to networks
+ docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet
+ ipv4_address: 172.1.1.18
+ links:
+ - sleeper
+ - name: TestingNet2
+ ipv4_address: 172.1.10.20
+
+- name: Update network with aliases
+ docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet
+ aliases:
+ - sleepyz
+ - zzzz
+
+- name: Remove container from one network
+ docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet2
+ purge_networks: yes
+
+- name: Remove container from all networks
+ docker_container:
+ name: sleepy
+ purge_networks: yes
+
+'''
+
+RETURN = '''
+docker_container:
+ description:
+ - Before 2.3 this was 'ansible_docker_container' but was renamed due to conflicts with the connection plugin.
+ - Facts representing the current state of the container. Matches the docker inspection output.
+ - Note that facts are not part of registered vars but accessible directly.
+ - Empty if C(state) is I(absent)
+ - If detached is I(False), will include Output attribute containing any output from container run.
+ returned: always
+ type: dict
+ sample: '{
+ "AppArmorProfile": "",
+ "Args": [],
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/usr/bin/supervisord"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "443/tcp": {},
+ "80/tcp": {}
+ },
+ "Hostname": "8e47bf643eb9",
+ "Image": "lnmp_nginx:v1",
+ "Labels": {},
+ "OnBuild": null,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/tmp/lnmp/nginx-sites/logs/": {}
+ },
+ ...
+ }'
+'''
+
+import re
+
+from ansible.module_utils.docker_common import *
+
+try:
+ from docker import utils
+ if HAS_DOCKER_PY_2:
+ from docker.types import Ulimit
+ else:
+ from docker.utils.types import Ulimit
+except:
+ # missing docker-py handled in ansible.module_utils.docker
+ pass
+
+
+REQUIRES_CONVERSION_TO_BYTES = [
+ 'memory',
+ 'memory_reservation',
+ 'memory_swap',
+ 'shm_size'
+]
+
+VOLUME_PERMISSIONS = ('rw', 'ro', 'z', 'Z')
+
+class TaskParameters(DockerBaseClass):
+ '''
+ Access and parse module parameters
+ '''
+
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.blkio_weight = None
+ self.capabilities = None
+ self.cleanup = None
+ self.command = None
+ self.cpu_period = None
+ self.cpu_quota = None
+ self.cpuset_cpus = None
+ self.cpuset_mems = None
+ self.cpu_shares = None
+ self.detach = None
+ self.debug = None
+ self.devices = None
+ self.dns_servers = None
+ self.dns_opts = None
+ self.dns_search_domains = None
+ self.env = None
+ self.env_file = None
+ self.entrypoint = None
+ self.etc_hosts = None
+ self.exposed_ports = None
+ self.force_kill = None
+ self.groups = None
+ self.hostname = None
+ self.ignore_image = None
+ self.image = None
+ self.interactive = None
+ self.ipc_mode = None
+ self.keep_volumes = None
+ self.kernel_memory = None
+ self.kill_signal = None
+ self.labels = None
+ self.links = None
+ self.log_driver = None
+ self.log_options = None
+ self.mac_address = None
+ self.memory = None
+ self.memory_reservation = None
+ self.memory_swap = None
+ self.memory_swappiness = None
+ self.name = None
+ self.network_mode = None
+ self.networks = None
+ self.oom_killer = None
+ self.oom_score_adj = None
+ self.paused = None
+ self.pid_mode = None
+ self.privileged = None
+ self.purge_networks = None
+ self.pull = None
+ self.read_only = None
+ self.recreate = None
+ self.restart = None
+ self.restart_retries = None
+ self.restart_policy = None
+ self.shm_size = None
+ self.security_opts = None
+ self.state = None
+ self.stop_signal = None
+ self.stop_timeout = None
+ self.trust_image_content = None
+ self.tty = None
+ self.user = None
+ self.uts = None
+ self.volumes = None
+ self.volume_binds = dict()
+ self.volumes_from = None
+ self.volume_driver = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+ for param_name in REQUIRES_CONVERSION_TO_BYTES:
+ if client.module.params.get(param_name):
+ try:
+ setattr(self, param_name, human_to_bytes(client.module.params.get(param_name)))
+ except ValueError as exc:
+ self.fail("Failed to convert %s to bytes: %s" % (param_name, exc))
+
+ self.publish_all_ports = False
+ self.published_ports = self._parse_publish_ports()
+ if self.published_ports in ('all', 'ALL'):
+ self.publish_all_ports = True
+ self.published_ports = None
+
+ self.ports = self._parse_exposed_ports(self.published_ports)
+ self.log("expose ports:")
+ self.log(self.ports, pretty_print=True)
+
+ self.links = self._parse_links(self.links)
+
+ if self.volumes:
+ self.volumes = self._expand_host_paths()
+
+ self.env = self._get_environment()
+ self.ulimits = self._parse_ulimits()
+ self.log_config = self._parse_log_config()
+ self.exp_links = None
+ self.volume_binds = self._get_volume_binds(self.volumes)
+
+ self.log("volumes:")
+ self.log(self.volumes, pretty_print=True)
+ self.log("volume binds:")
+ self.log(self.volume_binds, pretty_print=True)
+
+ if self.networks:
+ for network in self.networks:
+ if not network.get('name'):
+ self.fail("Parameter error: network must have a name attribute.")
+ network['id'] = self._get_network_id(network['name'])
+ if not network['id']:
+ self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name'])
+ if network.get('links'):
+ network['links'] = self._parse_links(network['links'])
+
+ def fail(self, msg):
+ self.client.module.fail_json(msg=msg)
+
+ @property
+ def update_parameters(self):
+ '''
+ Returns parameters used to update a container
+ '''
+
+ update_parameters = dict(
+ blkio_weight='blkio_weight',
+ cpu_period='cpu_period',
+ cpu_quota='cpu_quota',
+ cpu_shares='cpu_shares',
+ cpuset_cpus='cpuset_cpus',
+ mem_limit='memory',
+ mem_reservation='mem_reservation',
+ memswap_limit='memory_swap',
+ kernel_memory='kernel_memory'
+ )
+ result = dict()
+ for key, value in update_parameters.items():
+ if getattr(self, value, None) is not None:
+ result[key] = getattr(self, value)
+ return result
+
+ @property
+ def create_parameters(self):
+ '''
+ Returns parameters used to create a container
+ '''
+ create_params = dict(
+ command='command',
+ hostname='hostname',
+ user='user',
+ detach='detach',
+ stdin_open='interactive',
+ tty='tty',
+ ports='ports',
+ environment='env',
+ name='name',
+ entrypoint='entrypoint',
+ cpu_shares='cpu_shares',
+ mac_address='mac_address',
+ labels='labels',
+ stop_signal='stop_signal',
+ volume_driver='volume_driver',
+ )
+
+ result = dict(
+ host_config=self._host_config(),
+ volumes=self._get_mounts(),
+ )
+
+ for key, value in create_params.items():
+ if getattr(self, value, None) is not None:
+ result[key] = getattr(self, value)
+ return result
+
+ def _expand_host_paths(self):
+ new_vols = []
+ for vol in self.volumes:
+ if ':' in vol:
+ if len(vol.split(':')) == 3:
+ host, container, mode = vol.split(':')
+ if re.match(r'[\.~]', host):
+ host = os.path.abspath(host)
+ new_vols.append("%s:%s:%s" % (host, container, mode))
+ continue
+ elif len(vol.split(':')) == 2:
+ parts = vol.split(':')
+ if parts[1] not in VOLUME_PERMISSIONS and re.match(r'[\.~]', parts[0]):
+ host = os.path.abspath(parts[0])
+ new_vols.append("%s:%s:rw" % (host, parts[1]))
+ continue
+ new_vols.append(vol)
+ return new_vols
+
+ def _get_mounts(self):
+ '''
+ Return a list of container mounts.
+ :return:
+ '''
+ result = []
+ if self.volumes:
+ for vol in self.volumes:
+ if ':' in vol:
+ if len(vol.split(':')) == 3:
+ host, container, _ = vol.split(':')
+ result.append(container)
+ continue
+ if len(vol.split(':')) == 2:
+ parts = vol.split(':')
+ if parts[1] not in VOLUME_PERMISSIONS:
+ result.append(parts[1])
+ continue
+ result.append(vol)
+ self.log("mounts:")
+ self.log(result, pretty_print=True)
+ return result
+
+ def _host_config(self):
+ '''
+ Returns parameters used to create a HostConfig object
+ '''
+
+ host_config_params=dict(
+ port_bindings='published_ports',
+ publish_all_ports='publish_all_ports',
+ links='links',
+ privileged='privileged',
+ dns='dns_servers',
+ dns_search='dns_search_domains',
+ binds='volume_binds',
+ volumes_from='volumes_from',
+ network_mode='network_mode',
+ cap_add='capabilities',
+ extra_hosts='etc_hosts',
+ read_only='read_only',
+ ipc_mode='ipc_mode',
+ security_opt='security_opts',
+ ulimits='ulimits',
+ log_config='log_config',
+ mem_limit='memory',
+ memswap_limit='memory_swap',
+ mem_swappiness='memory_swappiness',
+ oom_score_adj='oom_score_adj',
+ shm_size='shm_size',
+ group_add='groups',
+ devices='devices',
+ pid_mode='pid_mode'
+ )
+ params = dict()
+ for key, value in host_config_params.items():
+ if getattr(self, value, None) is not None:
+ params[key] = getattr(self, value)
+
+ if self.restart_policy:
+ params['restart_policy'] = dict(Name=self.restart_policy,
+ MaximumRetryCount=self.restart_retries)
+
+ return self.client.create_host_config(**params)
+
+ @property
+ def default_host_ip(self):
+ ip = '0.0.0.0'
+ if not self.networks:
+ return ip
+ for net in self.networks:
+ if net.get('name'):
+ network = self.client.inspect_network(net['name'])
+ if network.get('Driver') == 'bridge' and \
+ network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'):
+ ip = network['Options']['com.docker.network.bridge.host_binding_ipv4']
+ break
+ return ip
+
+ def _parse_publish_ports(self):
+ '''
+ Parse ports from docker CLI syntax
+ '''
+ if self.published_ports is None:
+ return None
+
+ if 'all' in self.published_ports:
+ return 'all'
+
+ default_ip = self.default_host_ip
+
+ binds = {}
+ for port in self.published_ports:
+ parts = str(port).split(':')
+ container_port = parts[-1]
+ if '/' not in container_port:
+ container_port = int(parts[-1])
+
+ p_len = len(parts)
+ if p_len == 1:
+ bind = (default_ip,)
+ elif p_len == 2:
+ bind = (default_ip, int(parts[0]))
+ elif p_len == 3:
+ bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],)
+
+ if container_port in binds:
+ old_bind = binds[container_port]
+ if isinstance(old_bind, list):
+ old_bind.append(bind)
+ else:
+ binds[container_port] = [binds[container_port], bind]
+ else:
+ binds[container_port] = bind
+ return binds
+
+ @staticmethod
+ def _get_volume_binds(volumes):
+ '''
+ Extract host bindings, if any, from list of volume mapping strings.
+
+ :return: dictionary of bind mappings
+ '''
+ result = dict()
+ if volumes:
+ for vol in volumes:
+ host = None
+ if ':' in vol:
+ if len(vol.split(':')) == 3:
+ host, container, mode = vol.split(':')
+ if len(vol.split(':')) == 2:
+ parts = vol.split(':')
+ if parts[1] not in VOLUME_PERMISSIONS:
+ host, container, mode = (vol.split(':') + ['rw'])
+ if host is not None:
+ result[host] = dict(
+ bind=container,
+ mode=mode
+ )
+ return result
+
+ def _parse_exposed_ports(self, published_ports):
+ '''
+ Parse exposed ports from docker CLI-style ports syntax.
+ '''
+ exposed = []
+ if self.exposed_ports:
+ for port in self.exposed_ports:
+ port = str(port).strip()
+ protocol = 'tcp'
+ match = re.search(r'(/.+$)', port)
+ if match:
+ protocol = match.group(1).replace('/', '')
+ port = re.sub(r'/.+$', '', port)
+ exposed.append((port, protocol))
+ if published_ports:
+ # Any published port should also be exposed
+ for publish_port in published_ports:
+ match = False
+ if isinstance(publish_port, basestring) and '/' in publish_port:
+ port, protocol = publish_port.split('/')
+ port = int(port)
+ else:
+ protocol = 'tcp'
+ port = int(publish_port)
+ for exposed_port in exposed:
+ if isinstance(exposed_port[0], basestring) and '-' in exposed_port[0]:
+ start_port, end_port = exposed_port[0].split('-')
+ if int(start_port) <= port <= int(end_port):
+ match = True
+ elif exposed_port[0] == port:
+ match = True
+ if not match:
+ exposed.append((port, protocol))
+ return exposed
+
+ @staticmethod
+ def _parse_links(links):
+ '''
+ Turn links into a dictionary
+ '''
+ if links is None:
+ return None
+
+ result = {}
+ for link in links:
+ parsed_link = link.split(':', 1)
+ if len(parsed_link) == 2:
+ result[parsed_link[0]] = parsed_link[1]
+ else:
+ result[parsed_link[0]] = parsed_link[0]
+ return result
+
+ def _parse_ulimits(self):
+ '''
+ Turn ulimits into an array of Ulimit objects
+ '''
+ if self.ulimits is None:
+ return None
+
+ results = []
+ for limit in self.ulimits:
+ limits = dict()
+ pieces = limit.split(':')
+ if len(pieces) >= 2:
+ limits['name'] = pieces[0]
+ limits['soft'] = int(pieces[1])
+ limits['hard'] = int(pieces[1])
+ if len(pieces) == 3:
+ limits['hard'] = int(pieces[2])
+ try:
+ results.append(Ulimit(**limits))
+ except ValueError as exc:
+ self.fail("Error parsing ulimits value %s - %s" % (limit, exc))
+ return results
+
+ def _parse_log_config(self):
+ '''
+ Create a LogConfig object
+ '''
+ if self.log_driver is None:
+ return None
+
+ options = dict(
+ Type=self.log_driver,
+ Config = dict()
+ )
+
+ if self.log_options is not None:
+ options['Config'] = self.log_options
+
+ try:
+ return LogConfig(**options)
+ except ValueError as exc:
+ self.fail('Error parsing logging options - %s' % (exc))
+
+ def _get_environment(self):
+ """
+ If environment file is combined with explicit environment variables, the explicit environment variables
+ take precedence.
+ """
+ final_env = {}
+ if self.env_file:
+ parsed_env_file = utils.parse_env_file(self.env_file)
+ for name, value in parsed_env_file.items():
+ final_env[name] = str(value)
+ if self.env:
+ for name, value in self.env.items():
+ final_env[name] = str(value)
+ return final_env
+
+ def _get_network_id(self, network_name):
+ network_id = None
+ try:
+ for network in self.client.networks(names=[network_name]):
+ if network['Name'] == network_name:
+ network_id = network['Id']
+ break
+ except Exception as exc:
+ self.fail("Error getting network id for %s - %s" % (network_name, str(exc)))
+ return network_id
+
+
+
+class Container(DockerBaseClass):
+
+ def __init__(self, container, parameters):
+ super(Container, self).__init__()
+ self.raw = container
+ self.Id = None
+ self.container = container
+ if container:
+ self.Id = container['Id']
+ self.Image = container['Image']
+ self.log(self.container, pretty_print=True)
+ self.parameters = parameters
+ self.parameters.expected_links = None
+ self.parameters.expected_ports = None
+ self.parameters.expected_exposed = None
+ self.parameters.expected_volumes = None
+ self.parameters.expected_ulimits = None
+ self.parameters.expected_etc_hosts = None
+ self.parameters.expected_env = None
+
+ def fail(self, msg):
+ self.parameters.client.module.fail_json(msg=msg)
+
+ @property
+ def exists(self):
+ return True if self.container else False
+
+ @property
+ def running(self):
+ if self.container and self.container.get('State'):
+ if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False):
+ return True
+ return False
+
+ def has_different_configuration(self, image):
+ '''
+ Diff parameters vs existing container config. Returns tuple: (True | False, List of differences)
+ '''
+ self.log('Starting has_different_configuration')
+ self.parameters.expected_entrypoint = self._get_expected_entrypoint()
+ self.parameters.expected_links = self._get_expected_links()
+ self.parameters.expected_ports = self._get_expected_ports()
+ self.parameters.expected_exposed = self._get_expected_exposed(image)
+ self.parameters.expected_volumes = self._get_expected_volumes(image)
+ self.parameters.expected_binds = self._get_expected_binds(image)
+ self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits)
+ self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts')
+ self.parameters.expected_env = self._get_expected_env(image)
+ self.parameters.expected_cmd = self._get_expected_cmd()
+ self.parameters.expected_devices = self._get_expected_devices()
+
+ if not self.container.get('HostConfig'):
+ self.fail("has_config_diff: Error parsing container properties. HostConfig missing.")
+ if not self.container.get('Config'):
+ self.fail("has_config_diff: Error parsing container properties. Config missing.")
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.")
+
+ host_config = self.container['HostConfig']
+ log_config = host_config.get('LogConfig', dict())
+ restart_policy = host_config.get('RestartPolicy', dict())
+ config = self.container['Config']
+ network = self.container['NetworkSettings']
+
+ # The previous version of the docker module ignored the detach state by
+ # assuming if the container was running, it must have been detached.
+ detach = not (config.get('AttachStderr') and config.get('AttachStdout'))
+
+ # "ExposedPorts": null returns None type & causes AttributeError - PR #5517
+ if config.get('ExposedPorts') is not None:
+ expected_exposed = [re.sub(r'/.+$', '', p) for p in config.get('ExposedPorts', dict()).keys()]
+ else:
+ expected_exposed = []
+
+ # Map parameters to container inspect results
+ config_mapping = dict(
+ image=config.get('Image'),
+ expected_cmd=config.get('Cmd'),
+ hostname=config.get('Hostname'),
+ user=config.get('User'),
+ detach=detach,
+ interactive=config.get('OpenStdin'),
+ capabilities=host_config.get('CapAdd'),
+ expected_devices=host_config.get('Devices'),
+ dns_servers=host_config.get('Dns'),
+ dns_opts=host_config.get('DnsOptions'),
+ dns_search_domains=host_config.get('DnsSearch'),
+ expected_env=(config.get('Env') or []),
+ expected_entrypoint=config.get('Entrypoint'),
+ expected_etc_hosts=host_config['ExtraHosts'],
+ expected_exposed=expected_exposed,
+ groups=host_config.get('GroupAdd'),
+ ipc_mode=host_config.get("IpcMode"),
+ labels=config.get('Labels'),
+ expected_links=host_config.get('Links'),
+ log_driver=log_config.get('Type'),
+ log_options=log_config.get('Config'),
+ mac_address=network.get('MacAddress'),
+ memory_swappiness=host_config.get('MemorySwappiness'),
+ network_mode=host_config.get('NetworkMode'),
+ oom_killer=host_config.get('OomKillDisable'),
+ oom_score_adj=host_config.get('OomScoreAdj'),
+ pid_mode=host_config.get('PidMode'),
+ privileged=host_config.get('Privileged'),
+ expected_ports=host_config.get('PortBindings'),
+ read_only=host_config.get('ReadonlyRootfs'),
+ restart_policy=restart_policy.get('Name'),
+ restart_retries=restart_policy.get('MaximumRetryCount'),
+ # Cannot test shm_size, as shm_size is not included in container inspection results.
+ # shm_size=host_config.get('ShmSize'),
+ security_opts=host_config.get("SecuriytOpt"),
+ stop_signal=config.get("StopSignal"),
+ tty=config.get('Tty'),
+ expected_ulimits=host_config.get('Ulimits'),
+ uts=host_config.get('UTSMode'),
+ expected_volumes=config.get('Volumes'),
+ expected_binds=host_config.get('Binds'),
+ volumes_from=host_config.get('VolumesFrom'),
+ volume_driver=host_config.get('VolumeDriver')
+ )
+
+ differences = []
+ for key, value in config_mapping.items():
+ self.log('check differences %s %s vs %s' % (key, getattr(self.parameters, key), str(value)))
+ if getattr(self.parameters, key, None) is not None:
+ if isinstance(getattr(self.parameters, key), list) and isinstance(value, list):
+ if len(getattr(self.parameters, key)) > 0 and isinstance(getattr(self.parameters, key)[0], dict):
+ # compare list of dictionaries
+ self.log("comparing list of dict: %s" % key)
+ match = self._compare_dictionary_lists(getattr(self.parameters, key), value)
+ else:
+ # compare two lists. Is list_a in list_b?
+ self.log("comparing lists: %s" % key)
+ set_a = set(getattr(self.parameters, key))
+ set_b = set(value)
+ match = (set_a <= set_b)
+ elif isinstance(getattr(self.parameters, key), dict) and isinstance(value, dict):
+ # compare two dicts
+ self.log("comparing two dicts: %s" % key)
+ match = self._compare_dicts(getattr(self.parameters, key), value)
+ else:
+ # primitive compare
+ self.log("primitive compare: %s" % key)
+ match = (getattr(self.parameters, key) == value)
+
+ if not match:
+ # no match. record the differences
+ item = dict()
+ item[key] = dict(
+ parameter=getattr(self.parameters, key),
+ container=value
+ )
+ differences.append(item)
+
+ has_differences = True if len(differences) > 0 else False
+ return has_differences, differences
+
+ def _compare_dictionary_lists(self, list_a, list_b):
+ '''
+ If all of list_a exists in list_b, return True
+ '''
+ if not isinstance(list_a, list) or not isinstance(list_b, list):
+ return False
+ matches = 0
+ for dict_a in list_a:
+ for dict_b in list_b:
+ if self._compare_dicts(dict_a, dict_b):
+ matches += 1
+ break
+ result = (matches == len(list_a))
+ return result
+
+ def _compare_dicts(self, dict_a, dict_b):
+ '''
+ If dict_a in dict_b, return True
+ '''
+ if not isinstance(dict_a, dict) or not isinstance(dict_b, dict):
+ return False
+ for key, value in dict_a.items():
+ if isinstance(value, dict):
+ match = self._compare_dicts(value, dict_b.get(key))
+ elif isinstance(value, list):
+ if len(value) > 0 and isinstance(value[0], dict):
+ match = self._compare_dictionary_lists(value, dict_b.get(key))
+ else:
+ set_a = set(value)
+ set_b = set(dict_b.get(key))
+ match = (set_a == set_b)
+ else:
+ match = (value == dict_b.get(key))
+ if not match:
+ return False
+ return True
+
+ def has_different_resource_limits(self):
+ '''
+ Diff parameters and container resource limits
+ '''
+ if not self.container.get('HostConfig'):
+ self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.")
+
+ host_config = self.container['HostConfig']
+
+ config_mapping = dict(
+ cpu_period=host_config.get('CpuPeriod'),
+ cpu_quota=host_config.get('CpuQuota'),
+ cpuset_cpus=host_config.get('CpusetCpus'),
+ cpuset_mems=host_config.get('CpusetMems'),
+ cpu_shares=host_config.get('CpuShares'),
+ kernel_memory=host_config.get("KernelMemory"),
+ memory=host_config.get('Memory'),
+ memory_reservation=host_config.get('MemoryReservation'),
+ memory_swap=host_config.get('MemorySwap'),
+ oom_score_adj=host_config.get('OomScoreAdj'),
+ )
+
+ differences = []
+ for key, value in config_mapping.items():
+ if getattr(self.parameters, key, None) and getattr(self.parameters, key) != value:
+ # no match. record the differences
+ item = dict()
+ item[key] = dict(
+ parameter=getattr(self.parameters, key),
+ container=value
+ )
+ differences.append(item)
+ different = (len(differences) > 0)
+ return different, differences
+
+ def has_network_differences(self):
+ '''
+ Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
+ '''
+ different = False
+ differences = []
+
+ if not self.parameters.networks:
+ return different, differences
+
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
+
+ connected_networks = self.container['NetworkSettings']['Networks']
+ for network in self.parameters.networks:
+ if connected_networks.get(network['name'], None) is None:
+ different = True
+ differences.append(dict(
+ parameter=network,
+ container=None
+ ))
+ else:
+ diff = False
+ if network.get('ipv4_address') and network['ipv4_address'] != connected_networks[network['name']].get('IPAddress'):
+ diff = True
+ if network.get('ipv6_address') and network['ipv6_address'] != connected_networks[network['name']].get('GlobalIPv6Address'):
+ diff = True
+ if network.get('aliases') and not connected_networks[network['name']].get('Aliases'):
+ diff = True
+ if network.get('aliases') and connected_networks[network['name']].get('Aliases'):
+ for alias in network.get('aliases'):
+ if alias not in connected_networks[network['name']].get('Aliases', []):
+ diff = True
+ if network.get('links') and not connected_networks[network['name']].get('Links'):
+ diff = True
+ if network.get('links') and connected_networks[network['name']].get('Links'):
+ expected_links = []
+ for link, alias in network['links'].items():
+ expected_links.append("%s:%s" % (link, alias))
+ for link in expected_links:
+ if link not in connected_networks[network['name']].get('Links', []):
+ diff = True
+ if diff:
+ different = True
+ differences.append(dict(
+ parameter=network,
+ container=dict(
+ name=network['name'],
+ ipv4_address=connected_networks[network['name']].get('IPAddress'),
+ ipv6_address=connected_networks[network['name']].get('GlobalIPv6Address'),
+ aliases=connected_networks[network['name']].get('Aliases'),
+ links=connected_networks[network['name']].get('Links')
+ )
+ ))
+ return different, differences
+
+ def has_extra_networks(self):
+ '''
+ Check if the container is connected to non-requested networks
+ '''
+ extra_networks = []
+ extra = False
+
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
+
+ connected_networks = self.container['NetworkSettings'].get('Networks')
+ if connected_networks:
+ for network, network_config in connected_networks.items():
+ keep = False
+ if self.parameters.networks:
+ for expected_network in self.parameters.networks:
+ if expected_network['name'] == network:
+ keep = True
+ if not keep:
+ extra = True
+ extra_networks.append(dict(name=network, id=network_config['NetworkID']))
+ return extra, extra_networks
+
+ def _get_expected_devices(self):
+ if not self.parameters.devices:
+ return None
+ expected_devices = []
+ for device in self.parameters.devices:
+ parts = device.split(':')
+ if len(parts) == 1:
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[0],
+ PathOnHost=parts[0]
+ ))
+ elif len(parts) == 2:
+ parts = device.split(':')
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ )
+ )
+ else:
+ expected_devices.append(
+ dict(
+ CgroupPermissions=parts[2],
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ ))
+ return expected_devices
+
+ def _get_expected_entrypoint(self):
+ self.log('_get_expected_entrypoint')
+ if not self.parameters.entrypoint:
+ return None
+ return shlex.split(self.parameters.entrypoint)
+
+ def _get_expected_ports(self):
+ if not self.parameters.published_ports:
+ return None
+ expected_bound_ports = {}
+ for container_port, config in self.parameters.published_ports.items():
+ if isinstance(container_port, int):
+ container_port = "%s/tcp" % container_port
+ if len(config) == 1:
+ expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}]
+ elif isinstance(config[0], tuple):
+ expected_bound_ports[container_port] = []
+ for host_ip, host_port in config:
+ expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': str(host_port)})
+ else:
+ expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}]
+ return expected_bound_ports
+
+ def _get_expected_links(self):
+ if self.parameters.links is None:
+ return None
+ self.log('parameter links:')
+ self.log(self.parameters.links, pretty_print=True)
+ exp_links = []
+ for link, alias in self.parameters.links.items():
+ exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias))
+ return exp_links
+
+ def _get_expected_binds(self, image):
+ self.log('_get_expected_binds')
+ image_vols = []
+ if image:
+ image_vols = self._get_image_binds(image['ContainerConfig'].get('Volumes'))
+ param_vols = []
+ if self.parameters.volumes:
+ for vol in self.parameters.volumes:
+ host = None
+ if ':' in vol:
+ if len(vol.split(':')) == 3:
+ host, container, mode = vol.split(':')
+ if len(vol.split(':')) == 2:
+ parts = vol.split(':')
+ if parts[1] not in VOLUME_PERMISSIONS:
+ host, container, mode = vol.split(':') + ['rw']
+ if host:
+ param_vols.append("%s:%s:%s" % (host, container, mode))
+ result = list(set(image_vols + param_vols))
+ self.log("expected_binds:")
+ self.log(result, pretty_print=True)
+ return result
+
+ def _get_image_binds(self, volumes):
+ '''
+ Convert array of binds to array of strings with format host_path:container_path:mode
+
+ :param volumes: array of bind dicts
+ :return: array of strings
+ '''
+ results = []
+ if isinstance(volumes, dict):
+ results += self._get_bind_from_dict(volumes)
+ elif isinstance(volumes, list):
+ for vol in volumes:
+ results += self._get_bind_from_dict(vol)
+ return results
+
+ @staticmethod
+ def _get_bind_from_dict(volume_dict):
+ results = []
+ if volume_dict:
+ for host_path, config in volume_dict.items():
+ if isinstance(config, dict) and config.get('bind'):
+ container_path = config.get('bind')
+ mode = config.get('mode', 'rw')
+ results.append("%s:%s:%s" % (host_path, container_path, mode))
+ return results
+
+ def _get_expected_volumes(self, image):
+ self.log('_get_expected_volumes')
+ expected_vols = dict()
+ if image and image['ContainerConfig'].get('Volumes'):
+ expected_vols.update(image['ContainerConfig'].get('Volumes'))
+
+ if self.parameters.volumes:
+ for vol in self.parameters.volumes:
+ container = None
+ if ':' in vol:
+ if len(vol.split(':')) == 3:
+ host, container, mode = vol.split(':')
+ if len(vol.split(':')) == 2:
+ parts = vol.split(':')
+ if parts[1] not in VOLUME_PERMISSIONS:
+ host, container, mode = vol.split(':') + ['rw']
+ new_vol = dict()
+ if container:
+ new_vol[container] = dict()
+ else:
+ new_vol[vol] = dict()
+ expected_vols.update(new_vol)
+
+ if not expected_vols:
+ expected_vols = None
+ self.log("expected_volumes:")
+ self.log(expected_vols, pretty_print=True)
+ return expected_vols
+
+ def _get_expected_env(self, image):
+ self.log('_get_expected_env')
+ expected_env = dict()
+ if image and image['ContainerConfig'].get('Env'):
+ for env_var in image['ContainerConfig']['Env']:
+ parts = env_var.split('=', 1)
+ expected_env[parts[0]] = parts[1]
+ if self.parameters.env:
+ expected_env.update(self.parameters.env)
+ param_env = []
+ for key, value in expected_env.items():
+ param_env.append("%s=%s" % (key, value))
+ return param_env
+
+ def _get_expected_exposed(self, image):
+ self.log('_get_expected_exposed')
+ image_ports = []
+ if image:
+ image_ports = [re.sub(r'/.+$', '', p) for p in (image['ContainerConfig'].get('ExposedPorts') or {}).keys()]
+ param_ports = []
+ if self.parameters.ports:
+ param_ports = [str(p[0]) for p in self.parameters.ports]
+ result = list(set(image_ports + param_ports))
+ self.log(result, pretty_print=True)
+ return result
+
+ def _get_expected_ulimits(self, config_ulimits):
+ self.log('_get_expected_ulimits')
+ if config_ulimits is None:
+ return None
+ results = []
+ for limit in config_ulimits:
+ results.append(dict(
+ Name=limit.name,
+ Soft=limit.soft,
+ Hard=limit.hard
+ ))
+ return results
+
+ def _get_expected_cmd(self):
+ self.log('_get_expected_cmd')
+ if not self.parameters.command:
+ return None
+ return shlex.split(self.parameters.command)
+
+ def _convert_simple_dict_to_list(self, param_name, join_with=':'):
+ if getattr(self.parameters, param_name, None) is None:
+ return None
+ results = []
+ for key, value in getattr(self.parameters, param_name).items():
+ results.append("%s%s%s" % (key, join_with, value))
+ return results
+
+
+class ContainerManager(DockerBaseClass):
+ '''
+ Perform container management tasks
+ '''
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {'changed': False, 'actions': []}
+ self.diff = {}
+ self.facts = {}
+
+ state = self.parameters.state
+ if state in ('stopped', 'started', 'present'):
+ self.present(state)
+ elif state == 'absent':
+ self.absent()
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ if self.client.module._diff or self.parameters.debug:
+ self.results['diff'] = self.diff
+
+ if self.facts:
+ self.results['ansible_facts'] = {'docker_container': self.facts}
+
+ def present(self, state):
+ container = self._get_container(self.parameters.name)
+ image = self._get_image()
+
+ if not container.exists:
+ # New container
+ self.log('No container found')
+ new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
+ if new_container:
+ container = new_container
+ else:
+ # Existing container
+ different, differences = container.has_different_configuration(image)
+ image_different = False
+ if not self.parameters.ignore_image:
+ image_different = self._image_is_different(image, container)
+ if image_different or different or self.parameters.recreate:
+ self.diff['differences'] = differences
+ if image_different:
+ self.diff['image_different'] = True
+ self.log("differences")
+ self.log(differences, pretty_print=True)
+ if container.running:
+ self.container_stop(container.Id)
+ self.container_remove(container.Id)
+ new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
+ if new_container:
+ container = new_container
+
+ if container and container.exists:
+ container = self.update_limits(container)
+ container = self.update_networks(container)
+
+ if state == 'started' and not container.running:
+ container = self.container_start(container.Id)
+ elif state == 'started' and self.parameters.restart:
+ self.container_stop(container.Id)
+ container = self.container_start(container.Id)
+ elif state == 'stopped' and container.running:
+ self.container_stop(container.Id)
+ container = self._get_container(container.Id)
+
+ self.facts = container.raw
+
+ def absent(self):
+ container = self._get_container(self.parameters.name)
+ if container.exists:
+ if container.running:
+ self.container_stop(container.Id)
+ self.container_remove(container.Id)
+
+ def fail(self, msg, **kwargs):
+ self.client.module.fail_json(msg=msg, **kwargs)
+
+ def _get_container(self, container):
+ '''
+ Expects container ID or Name. Returns a container object
+ '''
+ return Container(self.client.get_container(container), self.parameters)
+
+ def _get_image(self):
+ if not self.parameters.image:
+ self.log('No image specified')
+ return None
+ repository, tag = utils.parse_repository_tag(self.parameters.image)
+ if not tag:
+ tag = "latest"
+ image = self.client.find_image(repository, tag)
+ if not self.check_mode:
+ if not image or self.parameters.pull:
+ self.log("Pull the image.")
+ image, alreadyToLatest = self.client.pull_image(repository, tag)
+ if alreadyToLatest:
+ self.results['changed'] = False
+ else:
+ self.results['changed'] = True
+ self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
+ self.log("image")
+ self.log(image, pretty_print=True)
+ return image
+
+ def _image_is_different(self, image, container):
+ if image and image.get('Id'):
+ if container and container.Image:
+ if image.get('Id') != container.Image:
+ return True
+ return False
+
+ def update_limits(self, container):
+ limits_differ, different_limits = container.has_different_resource_limits()
+ if limits_differ:
+ self.log("limit differences:")
+ self.log(different_limits, pretty_print=True)
+ if limits_differ and not self.check_mode:
+ self.container_update(container.Id, self.parameters.update_parameters)
+ return self._get_container(container.Id)
+ return container
+
+ def update_networks(self, container):
+ has_network_differences, network_differences = container.has_network_differences()
+ updated_container = container
+ if has_network_differences:
+ if self.diff.get('differences'):
+ self.diff['differences'].append(dict(network_differences=network_differences))
+ else:
+ self.diff['differences'] = [dict(network_differences=network_differences)]
+ self.results['changed'] = True
+ updated_container = self._add_networks(container, network_differences)
+
+ if self.parameters.purge_networks:
+ has_extra_networks, extra_networks = container.has_extra_networks()
+ if has_extra_networks:
+ if self.diff.get('differences'):
+ self.diff['differences'].append(dict(purge_networks=extra_networks))
+ else:
+ self.diff['differences'] = [dict(purge_networks=extra_networks)]
+ self.results['changed'] = True
+ updated_container = self._purge_networks(container, extra_networks)
+ return updated_container
+
+ def _add_networks(self, container, differences):
+ for diff in differences:
+ # remove the container from the network, if connected
+ if diff.get('container'):
+ self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
+ if not self.check_mode:
+ try:
+ self.client.disconnect_container_from_network(container.Id, diff['parameter']['id'])
+ except Exception as exc:
+ self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
+ str(exc)))
+ # connect to the network
+ params = dict(
+ ipv4_address=diff['parameter'].get('ipv4_address', None),
+ ipv6_address=diff['parameter'].get('ipv6_address', None),
+ links=diff['parameter'].get('links', None),
+ aliases=diff['parameter'].get('aliases', None)
+ )
+ self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params))
+ if not self.check_mode:
+ try:
+ self.log("Connecting container to network %s" % diff['parameter']['id'])
+ self.log(params, pretty_print=True)
+ self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params)
+ except Exception as exc:
+ self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], str(exc)))
+ return self._get_container(container.Id)
+
+ def _purge_networks(self, container, networks):
+ for network in networks:
+ self.results['actions'].append(dict(removed_from_network=network['name']))
+ if not self.check_mode:
+ try:
+ self.client.disconnect_container_from_network(container.Id, network['name'])
+ except Exception as exc:
+ self.fail("Error disconnecting container from network %s - %s" % (network['name'],
+ str(exc)))
+ return self._get_container(container.Id)
+
+ def container_create(self, image, create_parameters):
+ self.log("create container")
+ self.log("image: %s parameters:" % image)
+ self.log(create_parameters, pretty_print=True)
+ self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
+ self.results['changed'] = True
+ new_container = None
+ if not self.check_mode:
+ try:
+ new_container = self.client.create_container(image, **create_parameters)
+ except Exception as exc:
+ self.fail("Error creating container: %s" % str(exc))
+ return self._get_container(new_container['Id'])
+ return new_container
+
+ def container_start(self, container_id):
+ self.log("start container %s" % (container_id))
+ self.results['actions'].append(dict(started=container_id))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.client.start(container=container_id)
+ except Exception as exc:
+ self.fail("Error starting container %s: %s" % (container_id, str(exc)))
+
+ if not self.parameters.detach:
+ status = self.client.wait(container_id)
+ output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False)
+ if status != 0:
+ self.fail(output, status=status)
+ if self.parameters.cleanup:
+ self.container_remove(container_id, force=True)
+ insp = self._get_container(container_id)
+ if insp.raw:
+ insp.raw['Output'] = output
+ else:
+ insp.raw = dict(Output=output)
+ return insp
+ return self._get_container(container_id)
+
+ def container_remove(self, container_id, link=False, force=False):
+ volume_state = (not self.parameters.keep_volumes)
+ self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
+ self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ try:
+ response = self.client.remove_container(container_id, v=volume_state, link=link, force=force)
+ except Exception as exc:
+ self.fail("Error removing container %s: %s" % (container_id, str(exc)))
+ return response
+
+ def container_update(self, container_id, update_parameters):
+ if update_parameters:
+ self.log("update container %s" % (container_id))
+ self.log(update_parameters, pretty_print=True)
+ self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
+ self.results['changed'] = True
+ if not self.check_mode and callable(getattr(self.client, 'update_container')):
+ try:
+ self.client.update_container(container_id, **update_parameters)
+ except Exception as exc:
+ self.fail("Error updating container %s: %s" % (container_id, str(exc)))
+ return self._get_container(container_id)
+
+ def container_kill(self, container_id):
+ self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ try:
+ if self.parameters.kill_signal:
+ response = self.client.kill(container_id, signal=self.parameters.kill_signal)
+ else:
+ response = self.client.kill(container_id)
+ except Exception as exc:
+ self.fail("Error killing container %s: %s" % (container_id, exc))
+ return response
+
+ def container_stop(self, container_id):
+ if self.parameters.force_kill:
+ self.container_kill(container_id)
+ return
+ self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ try:
+ if self.parameters.stop_timeout:
+ response = self.client.stop(container_id, timeout=self.parameters.stop_timeout)
+ else:
+ response = self.client.stop(container_id)
+ except Exception as exc:
+ self.fail("Error stopping container %s: %s" % (container_id, str(exc)))
+ return response
+
+
+def main():
+ argument_spec = dict(
+ blkio_weight=dict(type='int'),
+ capabilities=dict(type='list'),
+ cleanup=dict(type='bool', default=False),
+ command=dict(type='str'),
+ cpu_period=dict(type='int'),
+ cpu_quota=dict(type='int'),
+ cpuset_cpus=dict(type='str'),
+ cpuset_mems=dict(type='str'),
+ cpu_shares=dict(type='int'),
+ detach=dict(type='bool', default=True),
+ devices=dict(type='list'),
+ dns_servers=dict(type='list'),
+ dns_opts=dict(type='list'),
+ dns_search_domains=dict(type='list'),
+ env=dict(type='dict'),
+ env_file=dict(type='path'),
+ entrypoint=dict(type='str'),
+ etc_hosts=dict(type='dict'),
+ exposed_ports=dict(type='list', aliases=['exposed', 'expose']),
+ force_kill=dict(type='bool', default=False, aliases=['forcekill']),
+ groups=dict(type='list'),
+ hostname=dict(type='str'),
+ ignore_image=dict(type='bool', default=False),
+ image=dict(type='str'),
+ interactive=dict(type='bool', default=False),
+ ipc_mode=dict(type='str'),
+ keep_volumes=dict(type='bool', default=True),
+ kernel_memory=dict(type='str'),
+ kill_signal=dict(type='str'),
+ labels=dict(type='dict'),
+ links=dict(type='list'),
+ log_driver=dict(type='str',
+ choices=['none', 'json-file', 'syslog', 'journald', 'gelf', 'fluentd', 'awslogs', 'splunk'],
+ default=None),
+ log_options=dict(type='dict', aliases=['log_opt']),
+ mac_address=dict(type='str'),
+ memory=dict(type='str', default='0'),
+ memory_reservation=dict(type='str'),
+ memory_swap=dict(type='str'),
+ memory_swappiness=dict(type='int'),
+ name=dict(type='str', required=True),
+ network_mode=dict(type='str'),
+ networks=dict(type='list'),
+ oom_killer=dict(type='bool'),
+ oom_score_adj=dict(type='int'),
+ paused=dict(type='bool', default=False),
+ pid_mode=dict(type='str'),
+ privileged=dict(type='bool', default=False),
+ published_ports=dict(type='list', aliases=['ports']),
+ pull=dict(type='bool', default=False),
+ purge_networks=dict(type='bool', default=False),
+ read_only=dict(type='bool', default=False),
+ recreate=dict(type='bool', default=False),
+ restart=dict(type='bool', default=False),
+ restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']),
+ restart_retries=dict(type='int', default=None),
+ shm_size=dict(type='str'),
+ security_opts=dict(type='list'),
+ state=dict(type='str', choices=['absent', 'present', 'started', 'stopped'], default='started'),
+ stop_signal=dict(type='str'),
+ stop_timeout=dict(type='int'),
+ trust_image_content=dict(type='bool', default=False),
+ tty=dict(type='bool', default=False),
+ ulimits=dict(type='list'),
+ user=dict(type='str'),
+ uts=dict(type='str'),
+ volumes=dict(type='list'),
+ volumes_from=dict(type='list'),
+ volume_driver=dict(type='str'),
+ )
+
+ required_if = [
+ ('state', 'present', ['image'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True
+ )
+
+ cm = ContainerManager(client)
+ client.module.exit_json(**cm.results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main() \ No newline at end of file
diff --git a/roles/openshift_health_checker/library/docker_info.py b/roles/openshift_health_checker/library/docker_info.py
new file mode 100644
index 000000000..7f712bcff
--- /dev/null
+++ b/roles/openshift_health_checker/library/docker_info.py
@@ -0,0 +1,24 @@
+# pylint: disable=missing-docstring
+"""
+Ansible module for determining information about the docker host.
+
+While there are several ansible modules that make use of the docker
+api to expose container and image facts in a remote host, they
+are unable to return specific information about the host machine
+itself. This module exposes the same information obtained through
+executing the `docker info` command on a docker host, in json format.
+"""
+
+from ansible.module_utils.docker_common import AnsibleDockerClient
+
+
+def main():
+ client = AnsibleDockerClient()
+
+ client.module.exit_json(
+ info=client.info(),
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index 2c70438c9..be63d864a 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -2,13 +2,14 @@
Health checks for OpenShift clusters.
"""
+import operator
import os
+
from abc import ABCMeta, abstractmethod, abstractproperty
from importlib import import_module
-import operator
-import six
-from six.moves import reduce
+from ansible.module_utils import six
+from ansible.module_utils.six.moves import reduce # pylint: disable=import-error,redefined-builtin
class OpenShiftCheckException(Exception):
@@ -20,8 +21,13 @@ class OpenShiftCheckException(Exception):
class OpenShiftCheck(object):
"""A base class for defining checks for an OpenShift cluster environment."""
- def __init__(self, module_executor):
- self.module_executor = module_executor
+ def __init__(self, execute_module=None, module_executor=None):
+ if execute_module is module_executor is None:
+ raise TypeError(
+ "__init__() takes either execute_module (recommended) "
+ "or module_executor (deprecated), none given")
+ self.execute_module = execute_module or module_executor
+ self.module_executor = self.execute_module
@abstractproperty
def name(self):
@@ -57,12 +63,28 @@ class OpenShiftCheck(object):
yield subclass
+LOADER_EXCLUDES = (
+ "__init__.py",
+ "mixins.py",
+)
+
+
+def load_checks():
+ """Dynamically import all check modules for the side effect of registering checks."""
+ return [
+ import_module(__package__ + "." + name[:-3])
+ for name in os.listdir(os.path.dirname(__file__))
+ if name.endswith(".py") and name not in LOADER_EXCLUDES
+ ]
+
+
def get_var(task_vars, *keys, **kwargs):
"""Helper function to get deeply nested values from task_vars.
Ansible task_vars structures are Python dicts, often mapping strings to
other dicts. This helper makes it easier to get a nested value, raising
- OpenShiftCheckException when a key is not found.
+ OpenShiftCheckException when a key is not found or returning a default value
+ provided as a keyword argument.
"""
try:
value = reduce(operator.getitem, keys, task_vars)
@@ -71,15 +93,3 @@ def get_var(task_vars, *keys, **kwargs):
return kwargs["default"]
raise OpenShiftCheckException("'{}' is undefined".format(".".join(map(str, keys))))
return value
-
-
-# Dynamically import all submodules for the side effect of loading checks.
-
-EXCLUDES = (
- "__init__.py",
- "mixins.py",
-)
-
-for name in os.listdir(os.path.dirname(__file__)):
- if name.endswith(".py") and name not in EXCLUDES:
- import_module(__package__ + "." + name[:-3])
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
new file mode 100644
index 000000000..cce289b95
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -0,0 +1,179 @@
+# pylint: disable=missing-docstring
+from openshift_checks import OpenShiftCheck, get_var
+
+
+class DockerImageAvailability(OpenShiftCheck):
+ """Check that required Docker images are available.
+
+ This check attempts to ensure that required docker images are
+ either present locally, or able to be pulled down from available
+ registries defined in a host machine.
+ """
+
+ name = "docker_image_availability"
+ tags = ["preflight"]
+
+ skopeo_image = "openshift/openshift-ansible"
+
+ # FIXME(juanvallejo): we should consider other possible values of
+ # `deployment_type` (the key here). See
+ # https://github.com/openshift/openshift-ansible/blob/8e26f8c/roles/openshift_repos/vars/main.yml#L7
+ docker_image_base = {
+ "origin": {
+ "repo": "openshift",
+ "image": "origin",
+ },
+ "openshift-enterprise": {
+ "repo": "openshift3",
+ "image": "ose",
+ },
+ }
+
+ def run(self, tmp, task_vars):
+ required_images = self.required_images(task_vars)
+ missing_images = set(required_images) - set(self.local_images(required_images, task_vars))
+
+ # exit early if all images were found locally
+ if not missing_images:
+ return {"changed": False}
+
+ msg, failed, changed = self.update_skopeo_image(task_vars)
+
+ # exit early if Skopeo update fails
+ if failed:
+ return {
+ "failed": True,
+ "changed": changed,
+ "msg": "Failed to update Skopeo image ({img_name}). {msg}".format(img_name=self.skopeo_image, msg=msg),
+ }
+
+ registries = self.known_docker_registries(task_vars)
+ available_images = self.available_images(missing_images, registries, task_vars)
+ unavailable_images = set(missing_images) - set(available_images)
+
+ if unavailable_images:
+ return {
+ "failed": True,
+ "msg": (
+ "One or more required images are not available: {}.\n"
+ "Configured registries: {}"
+ ).format(", ".join(sorted(unavailable_images)), ", ".join(registries)),
+ "changed": changed,
+ }
+
+ return {"changed": changed}
+
+ def required_images(self, task_vars):
+ deployment_type = get_var(task_vars, "deployment_type")
+ # FIXME(juanvallejo): we should handle gracefully with a proper error
+ # message when given an unexpected value for `deployment_type`.
+ image_base_name = self.docker_image_base[deployment_type]
+
+ openshift_release = get_var(task_vars, "openshift_release")
+ # FIXME(juanvallejo): this variable is not required when the
+ # installation is non-containerized. The example inventories have it
+ # commented out. We should handle gracefully and with a proper error
+ # message when this variable is required and not set.
+ openshift_image_tag = get_var(task_vars, "openshift_image_tag")
+
+ is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
+
+ if is_containerized:
+ images = set(self.containerized_docker_images(image_base_name, openshift_release))
+ else:
+ images = set(self.rpm_docker_images(image_base_name, openshift_release))
+
+ # append images with qualified image tags to our list of required images.
+ # these are images with a (v0.0.0.0) tag, rather than a standard release
+ # format tag (v0.0). We want to check this set in both containerized and
+ # non-containerized installations.
+ images.update(
+ self.qualified_docker_images(self.image_from_base_name(image_base_name), "v" + openshift_image_tag)
+ )
+
+ return images
+
+ def local_images(self, images, task_vars):
+ """Filter a list of images and return those available locally."""
+ return [
+ image for image in images
+ if self.is_image_local(image, task_vars)
+ ]
+
+ def is_image_local(self, image, task_vars):
+ result = self.module_executor("docker_image_facts", {"name": image}, task_vars)
+ if result.get("failed", False):
+ return False
+
+ return bool(result.get("images", []))
+
+ def known_docker_registries(self, task_vars):
+ result = self.module_executor("docker_info", {}, task_vars)
+
+ if result.get("failed", False):
+ return []
+
+ # FIXME(juanvallejo): wrong default type, result["info"] is expected to
+ # contain a dictionary (see how we call `docker_info.get` below).
+ docker_info = result.get("info", "")
+ return [registry.get("Name", "") for registry in docker_info.get("Registries", {})]
+
+ def available_images(self, images, registries, task_vars):
+ """Inspect existing images using Skopeo and return all images successfully inspected."""
+ return [
+ image for image in images
+ if self.is_image_available(image, registries, task_vars)
+ ]
+
+ def is_image_available(self, image, registries, task_vars):
+ for registry in registries:
+ if self.is_available_skopeo_image(image, registry, task_vars):
+ return True
+
+ return False
+
+ def is_available_skopeo_image(self, image, registry, task_vars):
+ """Uses Skopeo to determine if required image exists in a given registry."""
+
+ cmd_str = "skopeo inspect docker://{registry}/{image}".format(
+ registry=registry,
+ image=image,
+ )
+
+ args = {
+ "name": "skopeo_inspect",
+ "image": self.skopeo_image,
+ "command": cmd_str,
+ "detach": False,
+ "cleanup": True,
+ }
+ result = self.module_executor("docker_container", args, task_vars)
+ return result.get("failed", False)
+
+ def containerized_docker_images(self, base_name, version):
+ return [
+ "{image}:{version}".format(image=self.image_from_base_name(base_name), version=version)
+ ]
+
+ @staticmethod
+ def rpm_docker_images(base, version):
+ return [
+ "{image_repo}/registry-console:{version}".format(image_repo=base["repo"], version=version)
+ ]
+
+ @staticmethod
+ def qualified_docker_images(image_name, version):
+ return [
+ "{}-{}:{}".format(image_name, component, version)
+ for component in "haproxy-router docker-registry deployer pod".split()
+ ]
+
+ @staticmethod
+ def image_from_base_name(base):
+ return "".join([base["repo"], "/", base["image"]])
+
+ # ensures that the skopeo docker image exists, and updates it
+ # with latest if image was already present locally.
+ def update_skopeo_image(self, task_vars):
+ result = self.module_executor("docker_image", {"name": self.skopeo_image}, task_vars)
+ return result.get("msg", ""), result.get("failed", False), result.get("changed", False)
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index 771123d61..9891972a6 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -21,7 +21,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
packages.update(self.node_packages(rpm_prefix))
args = {"packages": sorted(set(packages))}
- return self.module_executor("check_yum_update", args, tmp, task_vars)
+ return self.execute_module("check_yum_update", args, tmp, task_vars)
@staticmethod
def master_packages(rpm_prefix):
diff --git a/roles/openshift_health_checker/openshift_checks/package_update.py b/roles/openshift_health_checker/openshift_checks/package_update.py
index c5a226954..fd0c0a755 100644
--- a/roles/openshift_health_checker/openshift_checks/package_update.py
+++ b/roles/openshift_health_checker/openshift_checks/package_update.py
@@ -11,4 +11,4 @@ class PackageUpdate(NotContainerizedMixin, OpenShiftCheck):
def run(self, tmp, task_vars):
args = {"packages": []}
- return self.module_executor("check_yum_update", args, tmp, task_vars)
+ return self.execute_module("check_yum_update", args, tmp, task_vars)
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 2e9d07deb..42193a1c6 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -17,4 +17,4 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
"prefix": rpm_prefix,
"version": openshift_release,
}
- return self.module_executor("aos_version", args, tmp, task_vars)
+ return self.execute_module("aos_version", args, tmp, task_vars)
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
new file mode 100644
index 000000000..a877246f4
--- /dev/null
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -0,0 +1,227 @@
+import pytest
+
+from openshift_health_check import ActionModule, resolve_checks
+from openshift_checks import OpenShiftCheckException
+
+
+def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None):
+ """Returns a new class that is compatible with OpenShiftCheck for testing."""
+
+ _name, _tags = name, tags
+
+ class FakeCheck(object):
+ name = _name
+ tags = _tags or []
+
+ def __init__(self, execute_module=None):
+ pass
+
+ @classmethod
+ def is_active(cls, task_vars):
+ return is_active
+
+ def run(self, tmp, task_vars):
+ if run_exception is not None:
+ raise run_exception
+ return run_return
+
+ return FakeCheck
+
+
+# Fixtures
+
+
+@pytest.fixture
+def plugin():
+ task = FakeTask('openshift_health_check', {'checks': ['fake_check']})
+ plugin = ActionModule(task, None, None, None, None, None)
+ return plugin
+
+
+class FakeTask(object):
+ def __init__(self, action, args):
+ self.action = action
+ self.args = args
+ self.async = 0
+
+
+@pytest.fixture
+def task_vars():
+ return dict(openshift=dict(), ansible_host='unit-test-host')
+
+
+# Assertion helpers
+
+
+def failed(result, msg_has=None):
+ if msg_has is not None:
+ assert 'msg' in result
+ for term in msg_has:
+ assert term in result['msg']
+ return result.get('failed', False)
+
+
+def changed(result):
+ return result.get('changed', False)
+
+
+def skipped(result):
+ return result.get('skipped', False)
+
+
+# Tests
+
+
+@pytest.mark.parametrize('task_vars', [
+ None,
+ {},
+])
+def test_action_plugin_missing_openshift_facts(plugin, task_vars):
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert failed(result, msg_has=['openshift_facts'])
+
+
+def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars, monkeypatch):
+ FakeCheck1 = fake_check('duplicate_name')
+ FakeCheck2 = fake_check('duplicate_name')
+ checks = [FakeCheck1, FakeCheck2]
+ monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert failed(result, msg_has=['unique', 'duplicate_name', 'FakeCheck'])
+
+
+def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch):
+ checks = [fake_check(is_active=False)]
+ monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert result['checks']['fake_check'] == {'skipped': True}
+ assert not failed(result)
+ assert not changed(result)
+ assert not skipped(result)
+
+
+def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
+ check_return_value = {'ok': 'test'}
+ check_class = fake_check(run_return=check_return_value)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert result['checks']['fake_check'] == check_return_value
+ assert not failed(result)
+ assert not changed(result)
+ assert not skipped(result)
+
+
+def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
+ check_return_value = {'ok': 'test', 'changed': True}
+ check_class = fake_check(run_return=check_return_value)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert result['checks']['fake_check'] == check_return_value
+ assert not failed(result)
+ assert changed(result)
+ assert not skipped(result)
+
+
+def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
+ check_return_value = {'failed': True}
+ check_class = fake_check(run_return=check_return_value)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert result['checks']['fake_check'] == check_return_value
+ assert failed(result, msg_has=['failed'])
+ assert not changed(result)
+ assert not skipped(result)
+
+
+def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
+ exception_msg = 'fake check has an exception'
+ run_exception = OpenShiftCheckException(exception_msg)
+ check_class = fake_check(run_exception=run_exception)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert failed(result['checks']['fake_check'], msg_has=exception_msg)
+ assert failed(result, msg_has=['failed'])
+ assert not changed(result)
+ assert not skipped(result)
+
+
+@pytest.mark.parametrize('names,all_checks,expected', [
+ ([], [], set()),
+ (
+ ['a', 'b'],
+ [
+ fake_check('a'),
+ fake_check('b'),
+ ],
+ set(['a', 'b']),
+ ),
+ (
+ ['a', 'b', '@group'],
+ [
+ fake_check('from_group_1', ['group', 'another_group']),
+ fake_check('not_in_group', ['another_group']),
+ fake_check('from_group_2', ['preflight', 'group']),
+ fake_check('a'),
+ fake_check('b'),
+ ],
+ set(['a', 'b', 'from_group_1', 'from_group_2']),
+ ),
+])
+def test_resolve_checks_ok(names, all_checks, expected):
+ assert resolve_checks(names, all_checks) == expected
+
+
+@pytest.mark.parametrize('names,all_checks,words_in_exception,words_not_in_exception', [
+ (
+ ['testA', 'testB'],
+ [],
+ ['check', 'name', 'testA', 'testB'],
+ ['tag', 'group', '@'],
+ ),
+ (
+ ['@group'],
+ [],
+ ['tag', 'name', 'group'],
+ ['check', '@'],
+ ),
+ (
+ ['testA', 'testB', '@group'],
+ [],
+ ['check', 'name', 'testA', 'testB', 'tag', 'group'],
+ ['@'],
+ ),
+ (
+ ['testA', 'testB', '@group'],
+ [
+ fake_check('from_group_1', ['group', 'another_group']),
+ fake_check('not_in_group', ['another_group']),
+ fake_check('from_group_2', ['preflight', 'group']),
+ ],
+ ['check', 'name', 'testA', 'testB'],
+ ['tag', 'group', '@'],
+ ),
+])
+def test_resolve_checks_failure(names, all_checks, words_in_exception, words_not_in_exception):
+ with pytest.raises(Exception) as excinfo:
+ resolve_checks(names, all_checks)
+ for word in words_in_exception:
+ assert word in str(excinfo.value)
+ for word in words_not_in_exception:
+ assert word not in str(excinfo.value)
diff --git a/roles/openshift_health_checker/test/conftest.py b/roles/openshift_health_checker/test/conftest.py
new file mode 100644
index 000000000..d16401260
--- /dev/null
+++ b/roles/openshift_health_checker/test/conftest.py
@@ -0,0 +1,10 @@
+import os
+import sys
+
+# extend sys.path so that tests can import openshift_checks and action plugins
+# from this role.
+openshift_health_checker_path = os.path.dirname(os.path.dirname(__file__))
+sys.path[1:1] = [
+ openshift_health_checker_path,
+ os.path.join(openshift_health_checker_path, 'action_plugins')
+]
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
new file mode 100644
index 000000000..2a9c32f77
--- /dev/null
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -0,0 +1,28 @@
+import pytest
+
+from openshift_checks.docker_image_availability import DockerImageAvailability
+
+
+@pytest.mark.xfail(strict=True) # TODO: remove this once this test is fully implemented.
+@pytest.mark.parametrize('task_vars,expected_result', [
+ (
+ dict(
+ openshift=dict(common=dict(
+ service_type='origin',
+ is_containerized=False,
+ )),
+ openshift_release='v3.5',
+ deployment_type='origin',
+ openshift_image_tag='', # FIXME: should not be required
+ ),
+ {'changed': False},
+ ),
+ # TODO: add more parameters here to test the multiple possible inputs that affect behavior.
+])
+def test_docker_image_availability(task_vars, expected_result):
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ return {'info': {}} # TODO: this will vary depending on input parameters.
+
+ check = DockerImageAvailability(execute_module=execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+ assert result == expected_result
diff --git a/roles/openshift_health_checker/test/mixins_test.py b/roles/openshift_health_checker/test/mixins_test.py
new file mode 100644
index 000000000..2d83e207d
--- /dev/null
+++ b/roles/openshift_health_checker/test/mixins_test.py
@@ -0,0 +1,23 @@
+import pytest
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+from openshift_checks.mixins import NotContainerizedMixin
+
+
+class NotContainerizedCheck(NotContainerizedMixin, OpenShiftCheck):
+ name = "not_containerized"
+ run = NotImplemented
+
+
+@pytest.mark.parametrize('task_vars,expected', [
+ (dict(openshift=dict(common=dict(is_containerized=False))), True),
+ (dict(openshift=dict(common=dict(is_containerized=True))), False),
+])
+def test_is_active(task_vars, expected):
+ assert NotContainerizedCheck.is_active(task_vars) == expected
+
+
+def test_is_active_missing_task_vars():
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ NotContainerizedCheck.is_active(task_vars={})
+ assert 'is_containerized' in str(excinfo.value)
diff --git a/roles/openshift_health_checker/test/openshift_check_test.py b/roles/openshift_health_checker/test/openshift_check_test.py
new file mode 100644
index 000000000..e3153979c
--- /dev/null
+++ b/roles/openshift_health_checker/test/openshift_check_test.py
@@ -0,0 +1,99 @@
+import pytest
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+from openshift_checks import load_checks, get_var
+
+
+# Fixtures
+
+
+@pytest.fixture()
+def task_vars():
+ return dict(foo=42, bar=dict(baz="openshift"))
+
+
+@pytest.fixture(params=[
+ ("notfound",),
+ ("multiple", "keys", "not", "in", "task_vars"),
+])
+def missing_keys(request):
+ return request.param
+
+
+# Tests
+
+
+def test_OpenShiftCheck_init():
+ class TestCheck(OpenShiftCheck):
+ name = "test_check"
+ run = NotImplemented
+
+ # initialization requires at least one argument (apart from self)
+ with pytest.raises(TypeError) as excinfo:
+ TestCheck()
+ assert 'execute_module' in str(excinfo.value)
+ assert 'module_executor' in str(excinfo.value)
+
+ execute_module = object()
+
+ # initialize with positional argument
+ check = TestCheck(execute_module)
+ # new recommended name
+ assert check.execute_module == execute_module
+ # deprecated attribute name
+ assert check.module_executor == execute_module
+
+ # initialize with keyword argument, recommended name
+ check = TestCheck(execute_module=execute_module)
+ # new recommended name
+ assert check.execute_module == execute_module
+ # deprecated attribute name
+ assert check.module_executor == execute_module
+
+ # initialize with keyword argument, deprecated name
+ check = TestCheck(module_executor=execute_module)
+ # new recommended name
+ assert check.execute_module == execute_module
+ # deprecated attribute name
+ assert check.module_executor == execute_module
+
+
+def test_subclasses():
+ """OpenShiftCheck.subclasses should find all subclasses recursively."""
+ class TestCheck1(OpenShiftCheck):
+ pass
+
+ class TestCheck2(OpenShiftCheck):
+ pass
+
+ class TestCheck1A(TestCheck1):
+ pass
+
+ local_subclasses = set([TestCheck1, TestCheck1A, TestCheck2])
+ known_subclasses = set(OpenShiftCheck.subclasses())
+
+ assert local_subclasses - known_subclasses == set(), "local_subclasses should be a subset of known_subclasses"
+
+
+def test_load_checks():
+ """Loading checks should load and return Python modules."""
+ modules = load_checks()
+ assert modules
+
+
+@pytest.mark.parametrize("keys,expected", [
+ (("foo",), 42),
+ (("bar", "baz"), "openshift"),
+])
+def test_get_var_ok(task_vars, keys, expected):
+ assert get_var(task_vars, *keys) == expected
+
+
+def test_get_var_error(task_vars, missing_keys):
+ with pytest.raises(OpenShiftCheckException):
+ get_var(task_vars, *missing_keys)
+
+
+def test_get_var_default(task_vars, missing_keys):
+ default = object()
+ assert get_var(task_vars, *missing_keys, default=default) == default
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
new file mode 100644
index 000000000..25385339a
--- /dev/null
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -0,0 +1,49 @@
+import pytest
+
+from openshift_checks.package_availability import PackageAvailability
+
+
+@pytest.mark.parametrize('task_vars,must_have_packages,must_not_have_packages', [
+ (
+ dict(openshift=dict(common=dict(service_type='openshift'))),
+ set(),
+ set(['openshift-master', 'openshift-node']),
+ ),
+ (
+ dict(
+ openshift=dict(common=dict(service_type='origin')),
+ group_names=['masters'],
+ ),
+ set(['origin-master']),
+ set(['origin-node']),
+ ),
+ (
+ dict(
+ openshift=dict(common=dict(service_type='atomic-openshift')),
+ group_names=['nodes'],
+ ),
+ set(['atomic-openshift-node']),
+ set(['atomic-openshift-master']),
+ ),
+ (
+ dict(
+ openshift=dict(common=dict(service_type='atomic-openshift')),
+ group_names=['masters', 'nodes'],
+ ),
+ set(['atomic-openshift-master', 'atomic-openshift-node']),
+ set(),
+ ),
+])
+def test_package_availability(task_vars, must_have_packages, must_not_have_packages):
+ return_value = object()
+
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ assert module_name == 'check_yum_update'
+ assert 'packages' in module_args
+ assert set(module_args['packages']).issuperset(must_have_packages)
+ assert not set(module_args['packages']).intersection(must_not_have_packages)
+ return return_value
+
+ check = PackageAvailability(execute_module=execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+ assert result is return_value
diff --git a/roles/openshift_health_checker/test/package_update_test.py b/roles/openshift_health_checker/test/package_update_test.py
new file mode 100644
index 000000000..5e000cff5
--- /dev/null
+++ b/roles/openshift_health_checker/test/package_update_test.py
@@ -0,0 +1,16 @@
+from openshift_checks.package_update import PackageUpdate
+
+
+def test_package_update():
+ return_value = object()
+
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ assert module_name == 'check_yum_update'
+ assert 'packages' in module_args
+ # empty list of packages means "generic check if 'yum update' will work"
+ assert module_args['packages'] == []
+ return return_value
+
+ check = PackageUpdate(execute_module=execute_module)
+ result = check.run(tmp=None, task_vars=None)
+ assert result is return_value
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
new file mode 100644
index 000000000..cc1d263bc
--- /dev/null
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -0,0 +1,21 @@
+from openshift_checks.package_version import PackageVersion
+
+
+def test_package_version():
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_release='v3.5',
+ )
+ return_value = object()
+
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ assert module_name == 'aos_version'
+ assert 'prefix' in module_args
+ assert 'version' in module_args
+ assert module_args['prefix'] == task_vars['openshift']['common']['service_type']
+ assert module_args['version'] == task_vars['openshift_release']
+ return return_value
+
+ check = PackageVersion(execute_module=execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+ assert result is return_value
diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md
index 328f800bf..6d576df71 100644
--- a/roles/openshift_hosted/README.md
+++ b/roles/openshift_hosted/README.md
@@ -26,6 +26,7 @@ From this role:
| openshift_hosted_registry_registryurl | 'openshift3/ose-${component}:${version}' | The image to base the OpenShift registry on. |
| openshift_hosted_registry_replicas | Number of nodes matching selector | The number of replicas to configure. |
| openshift_hosted_registry_selector | region=infra | Node selector used when creating registry. The OpenShift registry will only be deployed to nodes matching this selector. |
+| openshift_hosted_registry_cert_expire_days | `730` (2 years) | Validity of the certificates in days. Works only with OpenShift version 1.5 (3.5) and later. |
Dependencies
------------
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index 0a6299c9b..596b36239 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -14,17 +14,19 @@ openshift_hosted_router_edits:
openshift_hosted_routers:
- name: router
- replicas: "{{ replicas }}"
+ replicas: "{{ replicas | default(1) }}"
namespace: default
serviceaccount: router
- selector: "{{ openshift_hosted_router_selector }}"
- images: "{{ openshift_hosted_router_image }}"
+ selector: "{{ openshift_hosted_router_selector | default(None) }}"
+ images: "{{ openshift_hosted_router_image | default(None) }}"
edits: "{{ openshift_hosted_router_edits }}"
stats_port: 1936
ports:
- 80:80
- 443:443
- certificates: "{{ openshift_hosted_router_certificate | default({}) }}"
+ certificates: "{{ openshift_hosted_router_certificates | default({}) }}"
openshift_hosted_router_certificates: {}
+openshift_hosted_registry_cert_expire_days: 730
+openshift_hosted_router_create_certificate: False
diff --git a/roles/openshift_hosted/filter_plugins/filters.py b/roles/openshift_hosted/filter_plugins/filters.py
index cbfadfe9d..7f41529ac 100644
--- a/roles/openshift_hosted/filter_plugins/filters.py
+++ b/roles/openshift_hosted/filter_plugins/filters.py
@@ -21,14 +21,21 @@ class FilterModule(object):
if replicas is not None:
return replicas
+ replicas = 1
+
+ # Ignore boolean expression limit of 5.
+ # pylint: disable=too-many-boolean-expressions
if (isinstance(router_nodes, dict) and
'results' in router_nodes and
'results' in router_nodes['results'] and
- 'items' in router_nodes['results']['results']):
+ isinstance(router_nodes['results']['results'], list) and
+ len(router_nodes['results']['results']) > 0 and
+ 'items' in router_nodes['results']['results'][0]):
- return len(router_nodes['results']['results'][0]['items'])
+ if len(router_nodes['results']['results'][0]['items']) > 0:
+ replicas = len(router_nodes['results']['results'][0]['items'])
- return 1
+ return replicas
def filters(self):
''' returns a mapping of filters to methods '''
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index e9b590550..9626c23c1 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -15,21 +15,3 @@ dependencies:
- role: openshift_cli
- role: openshift_hosted_facts
- role: lib_openshift
-- role: openshift_projects
- openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts({'default':{'default_node_selector':''},'openshift-infra':{'default_node_selector':''},'logging':{'default_node_selector':''}}) }}"
-- role: openshift_serviceaccounts
- openshift_serviceaccounts_names:
- - router
- - registry
- openshift_serviceaccounts_namespace: default
- openshift_serviceaccounts_sccs:
- - hostnetwork
- when: openshift.common.version_gte_3_2_or_1_2
-- role: openshift_serviceaccounts
- openshift_serviceaccounts_names:
- - router
- - registry
- openshift_serviceaccounts_namespace: default
- openshift_serviceaccounts_sccs:
- - privileged
- when: not openshift.common.version_gte_3_2_or_1_2
diff --git a/roles/openshift_hosted/tasks/main.yml b/roles/openshift_hosted/tasks/main.yml
index fe254f72d..6efe2f63c 100644
--- a/roles/openshift_hosted/tasks/main.yml
+++ b/roles/openshift_hosted/tasks/main.yml
@@ -1,4 +1,11 @@
---
+- name: Create projects
+ oc_project:
+ name: "{{ item.key }}"
+ node_selector:
+ - "{{ item.value.default_node_selector }}"
+ with_dict: "{{ openshift_projects }}"
+
- include: router/router.yml
when: openshift_hosted_manage_router | default(true) | bool
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index d89ce855a..0b8042473 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -56,12 +56,24 @@
openshift_hosted_registry_force:
- False
+- name: Create the registry service account
+ oc_serviceaccount:
+ name: "{{ openshift_hosted_registry_serviceaccount }}"
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+
+- name: Grant the registry serivce account access to the appropriate scc
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ openshift_hosted_registry_namespace }}:{{ openshift_hosted_registry_serviceaccount }}"
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ resource_kind: scc
+ resource_name: hostnetwork
+
- name: oc adm policy add-cluster-role-to-user system:registry system:serviceaccount:default:registry
oc_adm_policy_user:
- user: system:serviceaccount:default:registry
+ user: "system:serviceaccount:{{ openshift_hosted_registry_namespace }}:{{ openshift_hosted_registry_serviceaccount }}"
+ namespace: "{{ openshift_hosted_registry_namespace }}"
resource_kind: cluster-role
resource_name: system:registry
- state: present
- name: create the default registry service
oc_service:
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml
index f9ea2ebeb..29c164f52 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/registry/secure.yml
@@ -53,10 +53,12 @@
signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
hostnames:
- "{{ docker_registry_service_ip.results.clusterip }}"
- - docker-registry.default.svc.cluster.local
+ - "{{ openshift_hosted_registry_name }}.default.svc"
+ - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift.common.dns_domain }}"
- "{{ docker_registry_route_hostname }}"
cert: "{{ openshift_master_config_dir }}/registry.crt"
key: "{{ openshift_master_config_dir }}/registry.key"
+ expire_days: "{{ openshift_hosted_registry_cert_expire_days if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool else omit }}"
register: server_cert_out
- name: Create the secret for the registry certificates
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index 3b7021eae..c71d0a34f 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -14,6 +14,31 @@
openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}"
openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}"
+# This is for when we desire a cluster signed cert
+# The certificate is generated and placed in master_config_dir/
+- block:
+ - name: generate a default wildcard router certificate
+ oc_adm_ca_server_cert:
+ signer_cert: "{{ openshift_master_config_dir }}/ca.crt"
+ signer_key: "{{ openshift_master_config_dir }}/ca.key"
+ signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
+ hostnames:
+ - "{{ openshift_master_default_subdomain }}"
+ - "*.{{ openshift_master_default_subdomain }}"
+ cert: "{{ ('/etc/origin/master/' ~ (item.certificates.certfile | basename)) if 'certfile' in item.certificates else ((openshift_master_config_dir) ~ '/openshift-router.crt') }}"
+ key: "{{ ('/etc/origin/master/' ~ (item.certificates.keyfile | basename)) if 'keyfile' in item.certificates else ((openshift_master_config_dir) ~ '/openshift-router.key') }}"
+ with_items: "{{ openshift_hosted_routers }}"
+
+ - name: set the openshift_hosted_router_certificates
+ set_fact:
+ openshift_hosted_router_certificates:
+ certfile: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}"
+ keyfile: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}"
+ cafile: "{{ openshift_master_config_dir ~ '/ca.crt' }}"
+
+ # End Block
+ when: openshift_hosted_router_create_certificate
+
- name: Get the certificate contents for router
copy:
backup: True
@@ -21,6 +46,31 @@
src: "{{ item }}"
with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificates') |
oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
+ when: not openshift_hosted_router_create_certificate
+
+- name: Create the router service account(s)
+ oc_serviceaccount:
+ name: "{{ item.serviceaccount }}"
+ namespace: "{{ item.namespace }}"
+ state: present
+ with_items: "{{ openshift_hosted_routers }}"
+
+- name: Grant the router serivce account(s) access to the appropriate scc
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ item.namespace }}:{{ item.serviceaccount }}"
+ namespace: "{{ item.namespace }}"
+ resource_kind: scc
+ resource_name: hostnetwork
+ with_items: "{{ openshift_hosted_routers }}"
+
+- name: Set additional permissions for router service account
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ item.namespace }}:{{ item.serviceaccount }}"
+ namespace: "{{ item.namespace }}"
+ resource_kind: cluster-role
+ resource_name: cluster-reader
+ when: item.namespace == 'default'
+ with_items: "{{ openshift_hosted_routers }}"
- name: Create OpenShift router
oc_adm_router:
diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2
index f3336334a..ca6a23f21 100644
--- a/roles/openshift_hosted/templates/registry_config.j2
+++ b/roles/openshift_hosted/templates/registry_config.j2
@@ -71,7 +71,7 @@ middleware:
- name: openshift
options:
pullthrough: {{ openshift_hosted_registry_pullthrough | default(true) }}
- acceptschema2: {{ openshift_hosted_registry_acceptschema2 | default(false) }}
+ acceptschema2: {{ openshift_hosted_registry_acceptschema2 | default(true) }}
enforcequota: {{ openshift_hosted_registry_enforcequota | default(false) }}
{% if openshift_hosted_registry_storage_provider | default('') == 's3' and openshift_hosted_registry_storage_s3_cloudfront_baseurl is defined %}
storage:
diff --git a/roles/openshift_hosted/vars/main.yml b/roles/openshift_hosted/vars/main.yml
index 521578cd0..0821d0e7e 100644
--- a/roles/openshift_hosted/vars/main.yml
+++ b/roles/openshift_hosted/vars/main.yml
@@ -1,3 +1,13 @@
---
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
registry_config_secret_name: registry-config
+
+openshift_default_projects:
+ default:
+ default_node_selector: ''
+ logging:
+ default_node_selector: ''
+ openshift-infra:
+ default_node_selector: ''
+
+openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts(openshift_default_projects) }}"
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml
index c67058696..fdfc285ca 100644
--- a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml
@@ -81,6 +81,7 @@ items:
metadata:
name: logging-deployer-edit-role
roleRef:
+ kind: ClusterRole
name: edit
subjects:
- kind: ServiceAccount
@@ -91,6 +92,7 @@ items:
metadata:
name: logging-deployer-dsadmin-role
roleRef:
+ kind: ClusterRole
name: daemonset-admin
subjects:
- kind: ServiceAccount
@@ -101,6 +103,7 @@ items:
metadata:
name: logging-elasticsearch-view-role
roleRef:
+ kind: ClusterRole
name: view
subjects:
- kind: ServiceAccount
@@ -217,11 +220,11 @@ items:
name: MODE
value: "install"
-
- description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set prefix "registry.access.redhat.com/openshift3/"'
+ description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
value: "registry.access.redhat.com/openshift3/"
-
- description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set version "3.4.0"'
+ description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"'
name: IMAGE_VERSION
value: "3.4.0"
-
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml
index 6ead122c5..c4ab794ae 100644
--- a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml
@@ -105,7 +105,7 @@ parameters:
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
name: IMAGE_VERSION
- value: "3.4.0"
+ value: "v3.5"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml
index bc8c79ca1..5b5503500 100644
--- a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml
@@ -81,7 +81,6 @@ items:
metadata:
name: logging-deployer-edit-role
roleRef:
- kind: ClusterRole
name: edit
subjects:
- kind: ServiceAccount
@@ -92,7 +91,6 @@ items:
metadata:
name: logging-deployer-dsadmin-role
roleRef:
- kind: ClusterRole
name: daemonset-admin
subjects:
- kind: ServiceAccount
@@ -103,7 +101,6 @@ items:
metadata:
name: logging-elasticsearch-view-role
roleRef:
- kind: ClusterRole
name: view
subjects:
- kind: ServiceAccount
diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml
new file mode 100644
index 000000000..fdfc285ca
--- /dev/null
+++ b/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml
@@ -0,0 +1,345 @@
+apiVersion: "v1"
+kind: "List"
+items:
+-
+ apiVersion: "v1"
+ kind: "Template"
+ metadata:
+ name: logging-deployer-account-template
+ annotations:
+ description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
+ tags: "infrastructure"
+ objects:
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ name: logging-deployer
+ metadata:
+ name: logging-deployer
+ labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-kibana
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-elasticsearch
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-fluentd
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-curator
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: oauth-editor
+ rules:
+ - resources:
+ - oauthclients
+ verbs:
+ - create
+ - delete
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: daemonset-admin
+ rules:
+ - resources:
+ - daemonsets
+ apiGroups:
+ - extensions
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - delete
+ - update
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: rolebinding-reader
+ rules:
+ - resources:
+ - clusterrolebindings
+ verbs:
+ - get
+ -
+ apiVersion: v1
+ kind: RoleBinding
+ metadata:
+ name: logging-deployer-edit-role
+ roleRef:
+ kind: ClusterRole
+ name: edit
+ subjects:
+ - kind: ServiceAccount
+ name: logging-deployer
+ -
+ apiVersion: v1
+ kind: RoleBinding
+ metadata:
+ name: logging-deployer-dsadmin-role
+ roleRef:
+ kind: ClusterRole
+ name: daemonset-admin
+ subjects:
+ - kind: ServiceAccount
+ name: logging-deployer
+ -
+ apiVersion: v1
+ kind: RoleBinding
+ metadata:
+ name: logging-elasticsearch-view-role
+ roleRef:
+ kind: ClusterRole
+ name: view
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
+-
+ apiVersion: "v1"
+ kind: "Template"
+ metadata:
+ name: logging-deployer-template
+ annotations:
+ description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
+ tags: "infrastructure"
+ labels:
+ logging-infra: deployer
+ provider: openshift
+ objects:
+ -
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: logging-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}logging-deployer:${IMAGE_VERSION}
+ imagePullPolicy: Always
+ name: deployer
+ volumeMounts:
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: IMAGE_PULL_SECRET
+ value: ${IMAGE_PULL_SECRET}
+ - name: INSECURE_REGISTRY
+ value: ${INSECURE_REGISTRY}
+ - name: ENABLE_OPS_CLUSTER
+ value: ${ENABLE_OPS_CLUSTER}
+ - name: KIBANA_HOSTNAME
+ value: ${KIBANA_HOSTNAME}
+ - name: KIBANA_OPS_HOSTNAME
+ value: ${KIBANA_OPS_HOSTNAME}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: ES_INSTANCE_RAM
+ value: ${ES_INSTANCE_RAM}
+ - name: ES_PVC_SIZE
+ value: ${ES_PVC_SIZE}
+ - name: ES_PVC_PREFIX
+ value: ${ES_PVC_PREFIX}
+ - name: ES_PVC_DYNAMIC
+ value: ${ES_PVC_DYNAMIC}
+ - name: ES_CLUSTER_SIZE
+ value: ${ES_CLUSTER_SIZE}
+ - name: ES_NODE_QUORUM
+ value: ${ES_NODE_QUORUM}
+ - name: ES_RECOVER_AFTER_NODES
+ value: ${ES_RECOVER_AFTER_NODES}
+ - name: ES_RECOVER_EXPECTED_NODES
+ value: ${ES_RECOVER_EXPECTED_NODES}
+ - name: ES_RECOVER_AFTER_TIME
+ value: ${ES_RECOVER_AFTER_TIME}
+ - name: ES_OPS_INSTANCE_RAM
+ value: ${ES_OPS_INSTANCE_RAM}
+ - name: ES_OPS_PVC_SIZE
+ value: ${ES_OPS_PVC_SIZE}
+ - name: ES_OPS_PVC_PREFIX
+ value: ${ES_OPS_PVC_PREFIX}
+ - name: ES_OPS_PVC_DYNAMIC
+ value: ${ES_OPS_PVC_DYNAMIC}
+ - name: ES_OPS_CLUSTER_SIZE
+ value: ${ES_OPS_CLUSTER_SIZE}
+ - name: ES_OPS_NODE_QUORUM
+ value: ${ES_OPS_NODE_QUORUM}
+ - name: ES_OPS_RECOVER_AFTER_NODES
+ value: ${ES_OPS_RECOVER_AFTER_NODES}
+ - name: ES_OPS_RECOVER_EXPECTED_NODES
+ value: ${ES_OPS_RECOVER_EXPECTED_NODES}
+ - name: ES_OPS_RECOVER_AFTER_TIME
+ value: ${ES_OPS_RECOVER_AFTER_TIME}
+ - name: FLUENTD_NODESELECTOR
+ value: ${FLUENTD_NODESELECTOR}
+ - name: ES_NODESELECTOR
+ value: ${ES_NODESELECTOR}
+ - name: ES_OPS_NODESELECTOR
+ value: ${ES_OPS_NODESELECTOR}
+ - name: KIBANA_NODESELECTOR
+ value: ${KIBANA_NODESELECTOR}
+ - name: KIBANA_OPS_NODESELECTOR
+ value: ${KIBANA_OPS_NODESELECTOR}
+ - name: CURATOR_NODESELECTOR
+ value: ${CURATOR_NODESELECTOR}
+ - name: CURATOR_OPS_NODESELECTOR
+ value: ${CURATOR_OPS_NODESELECTOR}
+ - name: MODE
+ value: ${MODE}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: logging-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ parameters:
+ -
+ description: "The mode that the deployer runs in."
+ name: MODE
+ value: "install"
+ -
+ description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"'
+ name: IMAGE_PREFIX
+ value: "registry.access.redhat.com/openshift3/"
+ -
+ description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"'
+ name: IMAGE_VERSION
+ value: "3.4.0"
+ -
+ description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
+ name: IMAGE_PULL_SECRET
+ -
+ description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
+ name: INSECURE_REGISTRY
+ value: "false"
+ -
+ description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
+ name: ENABLE_OPS_CLUSTER
+ value: "false"
+ -
+ description: "(Deprecated) External hostname where clients will reach kibana"
+ name: KIBANA_HOSTNAME
+ value: "kibana.example.com"
+ -
+ description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
+ name: KIBANA_OPS_HOSTNAME
+ value: kibana-ops.example.com
+ -
+ description: "(Deprecated) External URL for the master, for OAuth purposes"
+ name: PUBLIC_MASTER_URL
+ value: "https://localhost:8443"
+ -
+ description: "(Deprecated) Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc.cluster.local"
+ -
+ description: "(Deprecated) How many instances of ElasticSearch to deploy."
+ name: ES_CLUSTER_SIZE
+ value: "1"
+ -
+ description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
+ name: ES_INSTANCE_RAM
+ value: "8G"
+ -
+ description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
+ name: ES_PVC_SIZE
+ -
+ description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
+ name: ES_PVC_PREFIX
+ value: "logging-es-"
+ -
+ description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
+ name: ES_PVC_DYNAMIC
+ -
+ description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_NODE_QUORUM
+ -
+ description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
+ name: ES_RECOVER_AFTER_NODES
+ -
+ description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
+ name: ES_RECOVER_EXPECTED_NODES
+ -
+ description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
+ name: ES_RECOVER_AFTER_TIME
+ value: "5m"
+ -
+ description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
+ name: ES_OPS_CLUSTER_SIZE
+ -
+ description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
+ name: ES_OPS_INSTANCE_RAM
+ value: "8G"
+ -
+ description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
+ name: ES_OPS_PVC_SIZE
+ -
+ description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
+ name: ES_OPS_PVC_PREFIX
+ value: "logging-es-ops-"
+ -
+ description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
+ name: ES_OPS_PVC_DYNAMIC
+ -
+ description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_OPS_NODE_QUORUM
+ -
+ description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_AFTER_NODES
+ -
+ description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_EXPECTED_NODES
+ -
+ description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
+ name: ES_OPS_RECOVER_AFTER_TIME
+ value: "5m"
+ -
+ description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
+ name: FLUENTD_NODESELECTOR
+ value: "logging-infra-fluentd=true"
+ -
+ description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
+ name: ES_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
+ name: ES_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector Kibana cluster (label=value)."
+ name: KIBANA_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
+ name: KIBANA_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector Curator (label=value)."
+ name: CURATOR_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector operations Curator (label=value)."
+ name: CURATOR_OPS_NODESELECTOR
+ value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml
new file mode 100644
index 000000000..c4ab794ae
--- /dev/null
+++ b/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml
@@ -0,0 +1,168 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: metrics-deployer-template
+ annotations:
+ description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
+ tags: "infrastructure"
+labels:
+ metrics-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: metrics-deployer-
+ spec:
+ securityContext: {}
+ containers:
+ - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
+ name: deployer
+ securityContext: {}
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: MODE
+ value: ${MODE}
+ - name: CONTINUE_ON_ERROR
+ value: ${CONTINUE_ON_ERROR}
+ - name: REDEPLOY
+ value: ${REDEPLOY}
+ - name: IGNORE_PREFLIGHT
+ value: ${IGNORE_PREFLIGHT}
+ - name: USE_PERSISTENT_STORAGE
+ value: ${USE_PERSISTENT_STORAGE}
+ - name: DYNAMICALLY_PROVISION_STORAGE
+ value: ${DYNAMICALLY_PROVISION_STORAGE}
+ - name: HAWKULAR_METRICS_HOSTNAME
+ value: ${HAWKULAR_METRICS_HOSTNAME}
+ - name: CASSANDRA_NODES
+ value: ${CASSANDRA_NODES}
+ - name: CASSANDRA_PV_SIZE
+ value: ${CASSANDRA_PV_SIZE}
+ - name: METRIC_DURATION
+ value: ${METRIC_DURATION}
+ - name: USER_WRITE_ACCESS
+ value: ${USER_WRITE_ACCESS}
+ - name: HEAPSTER_NODE_ID
+ value: ${HEAPSTER_NODE_ID}
+ - name: METRIC_RESOLUTION
+ value: ${METRIC_RESOLUTION}
+ - name: STARTUP_TIMEOUT
+ value: ${STARTUP_TIMEOUT}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: metrics-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: metrics-deployer
+parameters:
+-
+ description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "registry.access.redhat.com/openshift3/"
+-
+ description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
+ name: IMAGE_VERSION
+ value: "v3.5"
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc:443"
+-
+ description: "External hostname where clients will reach Hawkular Metrics"
+ name: HAWKULAR_METRICS_HOSTNAME
+ required: true
+-
+ description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
+ name: MODE
+ value: "deploy"
+-
+ description: "Set to true to continue even if the deployer runs into an error."
+ name: CONTINUE_ON_ERROR
+ value: "false"
+-
+ description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
+ name: REDEPLOY
+ value: "false"
+-
+ description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
+ name: IGNORE_PREFLIGHT
+ value: "false"
+-
+ description: "Set to true for persistent storage, set to false to use non persistent storage"
+ name: USE_PERSISTENT_STORAGE
+ value: "true"
+-
+ description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
+ name: DYNAMICALLY_PROVISION_STORAGE
+ value: "false"
+-
+ description: "The number of Cassandra Nodes to deploy for the initial cluster"
+ name: CASSANDRA_NODES
+ value: "1"
+-
+ description: "The persistent volume size for each of the Cassandra nodes"
+ name: CASSANDRA_PV_SIZE
+ value: "10Gi"
+-
+ description: "How many days metrics should be stored for."
+ name: METRIC_DURATION
+ value: "7"
+-
+ description: "If a user accounts should be allowed to write metrics."
+ name: USER_WRITE_ACCESS
+ value: "false"
+-
+ description: "The identifier used when generating metric ids in Hawkular"
+ name: HEAPSTER_NODE_ID
+ value: "nodename"
+-
+ description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
+ name: METRIC_RESOLUTION
+ value: "30s"
+-
+ description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
+ name: STARTUP_TIMEOUT
+ value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml
new file mode 100644
index 000000000..28feac4e6
--- /dev/null
+++ b/roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml
@@ -0,0 +1,124 @@
+kind: Template
+apiVersion: v1
+metadata:
+ name: "registry-console"
+ annotations:
+ description: "Template for deploying registry web console. Requires cluster-admin."
+ tags: infrastructure
+labels:
+ createdBy: "registry-console-template"
+objects:
+ - kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: "registry-console"
+ labels:
+ name: "registry-console"
+ spec:
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "registry-console"
+ template:
+ metadata:
+ labels:
+ name: "registry-console"
+ spec:
+ containers:
+ - name: registry-console
+ image: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
+ ports:
+ - containerPort: 9090
+ protocol: TCP
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /ping
+ port: 9090
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /ping
+ port: 9090
+ scheme: HTTP
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ env:
+ - name: OPENSHIFT_OAUTH_PROVIDER_URL
+ value: "${OPENSHIFT_OAUTH_PROVIDER_URL}"
+ - name: OPENSHIFT_OAUTH_CLIENT_ID
+ value: "${OPENSHIFT_OAUTH_CLIENT_ID}"
+ - name: KUBERNETES_INSECURE
+ value: "false"
+ - name: COCKPIT_KUBE_INSECURE
+ value: "false"
+ - name: REGISTRY_ONLY
+ value: "true"
+ - name: REGISTRY_HOST
+ value: "${REGISTRY_HOST}"
+ - kind: Service
+ apiVersion: v1
+ metadata:
+ name: "registry-console"
+ labels:
+ name: "registry-console"
+ spec:
+ type: ClusterIP
+ ports:
+ - name: registry-console
+ protocol: TCP
+ port: 9000
+ targetPort: 9090
+ selector:
+ name: "registry-console"
+ - kind: ImageStream
+ apiVersion: v1
+ metadata:
+ name: registry-console
+ annotations:
+ description: Atomic Registry console
+ spec:
+ tags:
+ - annotations: null
+ from:
+ kind: DockerImage
+ name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_VERSION}
+ - kind: OAuthClient
+ apiVersion: v1
+ metadata:
+ name: "${OPENSHIFT_OAUTH_CLIENT_ID}"
+ respondWithChallenges: false
+ secret: "${OPENSHIFT_OAUTH_CLIENT_SECRET}"
+ redirectURIs:
+ - "${COCKPIT_KUBE_URL}"
+parameters:
+ - description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
+ name: IMAGE_PREFIX
+ value: "registry.access.redhat.com/openshift3/"
+ - description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:3.5", set version "3.5"'
+ name: IMAGE_VERSION
+ value: "3.5"
+ - description: "The public URL for the Openshift OAuth Provider, e.g. https://openshift.example.com:8443"
+ name: OPENSHIFT_OAUTH_PROVIDER_URL
+ required: true
+ - description: "The registry console URL. This should be created beforehand using 'oc create route passthrough --service registry-console --port registry-console -n default', e.g. https://registry-console-default.example.com"
+ name: COCKPIT_KUBE_URL
+ required: true
+ - description: "Oauth client secret"
+ name: OPENSHIFT_OAUTH_CLIENT_SECRET
+ from: "user[a-zA-Z0-9]{64}"
+ generate: expression
+ - description: "Oauth client id"
+ name: OPENSHIFT_OAUTH_CLIENT_ID
+ value: "cockpit-oauth-client"
+ - description: "The integrated registry hostname exposed via route, e.g. registry.example.com"
+ name: REGISTRY_HOST
+ required: true
diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml
new file mode 100644
index 000000000..5b5503500
--- /dev/null
+++ b/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml
@@ -0,0 +1,342 @@
+apiVersion: "v1"
+kind: "List"
+items:
+-
+ apiVersion: "v1"
+ kind: "Template"
+ metadata:
+ name: logging-deployer-account-template
+ annotations:
+ description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
+ tags: "infrastructure"
+ objects:
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ name: logging-deployer
+ metadata:
+ name: logging-deployer
+ labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-kibana
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-elasticsearch
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-fluentd
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-curator
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: oauth-editor
+ rules:
+ - resources:
+ - oauthclients
+ verbs:
+ - create
+ - delete
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: daemonset-admin
+ rules:
+ - resources:
+ - daemonsets
+ apiGroups:
+ - extensions
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - delete
+ - update
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: rolebinding-reader
+ rules:
+ - resources:
+ - clusterrolebindings
+ verbs:
+ - get
+ -
+ apiVersion: v1
+ kind: RoleBinding
+ metadata:
+ name: logging-deployer-edit-role
+ roleRef:
+ name: edit
+ subjects:
+ - kind: ServiceAccount
+ name: logging-deployer
+ -
+ apiVersion: v1
+ kind: RoleBinding
+ metadata:
+ name: logging-deployer-dsadmin-role
+ roleRef:
+ name: daemonset-admin
+ subjects:
+ - kind: ServiceAccount
+ name: logging-deployer
+ -
+ apiVersion: v1
+ kind: RoleBinding
+ metadata:
+ name: logging-elasticsearch-view-role
+ roleRef:
+ name: view
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
+-
+ apiVersion: "v1"
+ kind: "Template"
+ metadata:
+ name: logging-deployer-template
+ annotations:
+ description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
+ tags: "infrastructure"
+ labels:
+ logging-infra: deployer
+ provider: openshift
+ objects:
+ -
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: logging-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
+ imagePullPolicy: Always
+ name: deployer
+ volumeMounts:
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: IMAGE_PULL_SECRET
+ value: ${IMAGE_PULL_SECRET}
+ - name: INSECURE_REGISTRY
+ value: ${INSECURE_REGISTRY}
+ - name: ENABLE_OPS_CLUSTER
+ value: ${ENABLE_OPS_CLUSTER}
+ - name: KIBANA_HOSTNAME
+ value: ${KIBANA_HOSTNAME}
+ - name: KIBANA_OPS_HOSTNAME
+ value: ${KIBANA_OPS_HOSTNAME}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: ES_INSTANCE_RAM
+ value: ${ES_INSTANCE_RAM}
+ - name: ES_PVC_SIZE
+ value: ${ES_PVC_SIZE}
+ - name: ES_PVC_PREFIX
+ value: ${ES_PVC_PREFIX}
+ - name: ES_PVC_DYNAMIC
+ value: ${ES_PVC_DYNAMIC}
+ - name: ES_CLUSTER_SIZE
+ value: ${ES_CLUSTER_SIZE}
+ - name: ES_NODE_QUORUM
+ value: ${ES_NODE_QUORUM}
+ - name: ES_RECOVER_AFTER_NODES
+ value: ${ES_RECOVER_AFTER_NODES}
+ - name: ES_RECOVER_EXPECTED_NODES
+ value: ${ES_RECOVER_EXPECTED_NODES}
+ - name: ES_RECOVER_AFTER_TIME
+ value: ${ES_RECOVER_AFTER_TIME}
+ - name: ES_OPS_INSTANCE_RAM
+ value: ${ES_OPS_INSTANCE_RAM}
+ - name: ES_OPS_PVC_SIZE
+ value: ${ES_OPS_PVC_SIZE}
+ - name: ES_OPS_PVC_PREFIX
+ value: ${ES_OPS_PVC_PREFIX}
+ - name: ES_OPS_PVC_DYNAMIC
+ value: ${ES_OPS_PVC_DYNAMIC}
+ - name: ES_OPS_CLUSTER_SIZE
+ value: ${ES_OPS_CLUSTER_SIZE}
+ - name: ES_OPS_NODE_QUORUM
+ value: ${ES_OPS_NODE_QUORUM}
+ - name: ES_OPS_RECOVER_AFTER_NODES
+ value: ${ES_OPS_RECOVER_AFTER_NODES}
+ - name: ES_OPS_RECOVER_EXPECTED_NODES
+ value: ${ES_OPS_RECOVER_EXPECTED_NODES}
+ - name: ES_OPS_RECOVER_AFTER_TIME
+ value: ${ES_OPS_RECOVER_AFTER_TIME}
+ - name: FLUENTD_NODESELECTOR
+ value: ${FLUENTD_NODESELECTOR}
+ - name: ES_NODESELECTOR
+ value: ${ES_NODESELECTOR}
+ - name: ES_OPS_NODESELECTOR
+ value: ${ES_OPS_NODESELECTOR}
+ - name: KIBANA_NODESELECTOR
+ value: ${KIBANA_NODESELECTOR}
+ - name: KIBANA_OPS_NODESELECTOR
+ value: ${KIBANA_OPS_NODESELECTOR}
+ - name: CURATOR_NODESELECTOR
+ value: ${CURATOR_NODESELECTOR}
+ - name: CURATOR_OPS_NODESELECTOR
+ value: ${CURATOR_OPS_NODESELECTOR}
+ - name: MODE
+ value: ${MODE}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: logging-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ parameters:
+ -
+ description: "The mode that the deployer runs in."
+ name: MODE
+ value: "install"
+ -
+ description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "docker.io/openshift/origin-"
+ -
+ description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "latest"
+ -
+ description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
+ name: IMAGE_PULL_SECRET
+ -
+ description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
+ name: INSECURE_REGISTRY
+ value: "false"
+ -
+ description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
+ name: ENABLE_OPS_CLUSTER
+ value: "false"
+ -
+ description: "(Deprecated) External hostname where clients will reach kibana"
+ name: KIBANA_HOSTNAME
+ value: "kibana.example.com"
+ -
+ description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
+ name: KIBANA_OPS_HOSTNAME
+ value: kibana-ops.example.com
+ -
+ description: "(Deprecated) External URL for the master, for OAuth purposes"
+ name: PUBLIC_MASTER_URL
+ value: "https://localhost:8443"
+ -
+ description: "(Deprecated) Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc.cluster.local"
+ -
+ description: "(Deprecated) How many instances of ElasticSearch to deploy."
+ name: ES_CLUSTER_SIZE
+ value: "1"
+ -
+ description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
+ name: ES_INSTANCE_RAM
+ value: "8G"
+ -
+ description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
+ name: ES_PVC_SIZE
+ -
+ description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
+ name: ES_PVC_PREFIX
+ value: "logging-es-"
+ -
+ description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
+ name: ES_PVC_DYNAMIC
+ -
+ description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_NODE_QUORUM
+ -
+ description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
+ name: ES_RECOVER_AFTER_NODES
+ -
+ description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
+ name: ES_RECOVER_EXPECTED_NODES
+ -
+ description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
+ name: ES_RECOVER_AFTER_TIME
+ value: "5m"
+ -
+ description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
+ name: ES_OPS_CLUSTER_SIZE
+ -
+ description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
+ name: ES_OPS_INSTANCE_RAM
+ value: "8G"
+ -
+ description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
+ name: ES_OPS_PVC_SIZE
+ -
+ description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
+ name: ES_OPS_PVC_PREFIX
+ value: "logging-es-ops-"
+ -
+ description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
+ name: ES_OPS_PVC_DYNAMIC
+ -
+ description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_OPS_NODE_QUORUM
+ -
+ description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_AFTER_NODES
+ -
+ description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_EXPECTED_NODES
+ -
+ description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
+ name: ES_OPS_RECOVER_AFTER_TIME
+ value: "5m"
+ -
+ description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
+ name: FLUENTD_NODESELECTOR
+ value: "logging-infra-fluentd=true"
+ -
+ description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
+ name: ES_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
+ name: ES_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector Kibana cluster (label=value)."
+ name: KIBANA_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
+ name: KIBANA_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector Curator (label=value)."
+ name: CURATOR_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector operations Curator (label=value)."
+ name: CURATOR_OPS_NODESELECTOR
+ value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml
new file mode 100644
index 000000000..d191c0439
--- /dev/null
+++ b/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml
@@ -0,0 +1,168 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: metrics-deployer-template
+ annotations:
+ description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
+ tags: "infrastructure"
+labels:
+ metrics-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: metrics-deployer-
+ spec:
+ securityContext: {}
+ containers:
+ - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
+ name: deployer
+ securityContext: {}
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: MODE
+ value: ${MODE}
+ - name: CONTINUE_ON_ERROR
+ value: ${CONTINUE_ON_ERROR}
+ - name: REDEPLOY
+ value: ${REDEPLOY}
+ - name: IGNORE_PREFLIGHT
+ value: ${IGNORE_PREFLIGHT}
+ - name: USE_PERSISTENT_STORAGE
+ value: ${USE_PERSISTENT_STORAGE}
+ - name: DYNAMICALLY_PROVISION_STORAGE
+ value: ${DYNAMICALLY_PROVISION_STORAGE}
+ - name: HAWKULAR_METRICS_HOSTNAME
+ value: ${HAWKULAR_METRICS_HOSTNAME}
+ - name: CASSANDRA_NODES
+ value: ${CASSANDRA_NODES}
+ - name: CASSANDRA_PV_SIZE
+ value: ${CASSANDRA_PV_SIZE}
+ - name: METRIC_DURATION
+ value: ${METRIC_DURATION}
+ - name: USER_WRITE_ACCESS
+ value: ${USER_WRITE_ACCESS}
+ - name: HEAPSTER_NODE_ID
+ value: ${HEAPSTER_NODE_ID}
+ - name: METRIC_RESOLUTION
+ value: ${METRIC_RESOLUTION}
+ - name: STARTUP_TIMEOUT
+ value: ${STARTUP_TIMEOUT}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: metrics-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: metrics-deployer
+parameters:
+-
+ description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "openshift/origin-"
+-
+ description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
+ name: IMAGE_VERSION
+ value: "latest"
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc:443"
+-
+ description: "External hostname where clients will reach Hawkular Metrics"
+ name: HAWKULAR_METRICS_HOSTNAME
+ required: true
+-
+ description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
+ name: MODE
+ value: "deploy"
+-
+ description: "Set to true to continue even if the deployer runs into an error."
+ name: CONTINUE_ON_ERROR
+ value: "false"
+-
+ description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
+ name: REDEPLOY
+ value: "false"
+-
+ description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
+ name: IGNORE_PREFLIGHT
+ value: "false"
+-
+ description: "Set to true for persistent storage, set to false to use non persistent storage"
+ name: USE_PERSISTENT_STORAGE
+ value: "true"
+-
+ description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
+ name: DYNAMICALLY_PROVISION_STORAGE
+ value: "false"
+-
+ description: "The number of Cassandra Nodes to deploy for the initial cluster"
+ name: CASSANDRA_NODES
+ value: "1"
+-
+ description: "The persistent volume size for each of the Cassandra nodes"
+ name: CASSANDRA_PV_SIZE
+ value: "10Gi"
+-
+ description: "How many days metrics should be stored for."
+ name: METRIC_DURATION
+ value: "7"
+-
+ description: "If a user accounts should be allowed to write metrics."
+ name: USER_WRITE_ACCESS
+ value: "false"
+-
+ description: "The identifier used when generating metric ids in Hawkular"
+ name: HEAPSTER_NODE_ID
+ value: "nodename"
+-
+ description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
+ name: METRIC_RESOLUTION
+ value: "30s"
+-
+ description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
+ name: STARTUP_TIMEOUT
+ value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml
new file mode 100644
index 000000000..80cc4233b
--- /dev/null
+++ b/roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml
@@ -0,0 +1,124 @@
+kind: Template
+apiVersion: v1
+metadata:
+ name: "registry-console"
+ annotations:
+ description: "Template for deploying registry web console. Requires cluster-admin."
+ tags: infrastructure
+labels:
+ createdBy: "registry-console-template"
+objects:
+ - kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: "registry-console"
+ labels:
+ name: "registry-console"
+ spec:
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "registry-console"
+ template:
+ metadata:
+ labels:
+ name: "registry-console"
+ spec:
+ containers:
+ - name: registry-console
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ ports:
+ - containerPort: 9090
+ protocol: TCP
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /ping
+ port: 9090
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /ping
+ port: 9090
+ scheme: HTTP
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ env:
+ - name: OPENSHIFT_OAUTH_PROVIDER_URL
+ value: "${OPENSHIFT_OAUTH_PROVIDER_URL}"
+ - name: OPENSHIFT_OAUTH_CLIENT_ID
+ value: "${OPENSHIFT_OAUTH_CLIENT_ID}"
+ - name: KUBERNETES_INSECURE
+ value: "false"
+ - name: COCKPIT_KUBE_INSECURE
+ value: "false"
+ - name: REGISTRY_ONLY
+ value: "true"
+ - name: REGISTRY_HOST
+ value: "${REGISTRY_HOST}"
+ - kind: Service
+ apiVersion: v1
+ metadata:
+ name: "registry-console"
+ labels:
+ name: "registry-console"
+ spec:
+ type: ClusterIP
+ ports:
+ - name: registry-console
+ protocol: TCP
+ port: 9000
+ targetPort: 9090
+ selector:
+ name: "registry-console"
+ - kind: ImageStream
+ apiVersion: v1
+ metadata:
+ name: registry-console
+ annotations:
+ description: Atomic Registry console
+ spec:
+ tags:
+ - annotations: null
+ from:
+ kind: DockerImage
+ name: ${IMAGE_NAME}
+ name: ${IMAGE_VERSION}
+ - kind: OAuthClient
+ apiVersion: v1
+ metadata:
+ name: "${OPENSHIFT_OAUTH_CLIENT_ID}"
+ respondWithChallenges: false
+ secret: "${OPENSHIFT_OAUTH_CLIENT_SECRET}"
+ redirectURIs:
+ - "${COCKPIT_KUBE_URL}"
+parameters:
+ - description: "Container image name"
+ name: IMAGE_NAME
+ value: "cockpit/kubernetes"
+ - description: 'Specify image version; e.g. for "cockpit/kubernetes:latest", set version "latest"'
+ name: IMAGE_VERSION
+ value: latest
+ - description: "The public URL for the Openshift OAuth Provider, e.g. https://openshift.example.com:8443"
+ name: OPENSHIFT_OAUTH_PROVIDER_URL
+ required: true
+ - description: "The registry console URL. This should be created beforehand using 'oc create route passthrough --service registry-console --port registry-console -n default', e.g. https://registry-console-default.example.com"
+ name: COCKPIT_KUBE_URL
+ required: true
+ - description: "Oauth client secret"
+ name: OPENSHIFT_OAUTH_CLIENT_SECRET
+ from: "user[a-zA-Z0-9]{64}"
+ generate: expression
+ - description: "Oauth client id"
+ name: OPENSHIFT_OAUTH_CLIENT_ID
+ value: "cockpit-oauth-client"
+ - description: "The integrated registry hostname exposed via route, e.g. registry.example.com"
+ name: REGISTRY_HOST
+ required: true
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 14b80304d..42f4fc72e 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -65,6 +65,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log
- `openshift_logging_es_cluster_size`: The number of ES cluster members. Defaults to '1'.
- `openshift_logging_es_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
- `openshift_logging_es_memory_limit`: The amount of RAM that should be assigned to ES. Defaults to '8Gi'.
+- `openshift_logging_es_log_appenders`: The list of rootLogger appenders for ES logs which can be: 'file', 'console'. Defaults to 'file'.
- `openshift_logging_es_pv_selector`: A key/value map added to a PVC in order to select specific PVs. Defaults to 'None'.
- `openshift_logging_es_pvc_dynamic`: Whether or not to add the dynamic PVC annotation for any generated PVCs. Defaults to 'False'.
- `openshift_logging_es_pvc_size`: The requested size for the ES PVCs, when not provided the role will not generate any PVCs. Defaults to '""'.
@@ -72,6 +73,8 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log
- `openshift_logging_es_recover_after_time`: The amount of time ES will wait before it tries to recover. Defaults to '5m'.
- `openshift_logging_es_storage_group`: The storage group used for ES. Defaults to '65534'.
- `openshift_logging_es_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
+- `openshift_logging_es_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'.
+- `openshift_logging_es_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'.
When `openshift_logging_use_ops` is `True`, there are some additional vars. These work the
same as above for their non-ops counterparts, but apply to the OPS cluster instance:
@@ -88,6 +91,8 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta
- `openshift_logging_es_ops_pvc_prefix`: logging-es-ops
- `openshift_logging_es_ops_recover_after_time`: 5m
- `openshift_logging_es_ops_storage_group`: 65534
+- `openshift_logging_es_ops_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'.
+- `openshift_logging_es_ops_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'.
- `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'.
- `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index ad9c1ce42..96ed44011 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -1,9 +1,7 @@
---
-openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
-openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | default('false') | bool }}"
openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
-openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' + openshift.master.api_port) }}"
+openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
openshift_logging_namespace: logging
openshift_logging_install_logging: True
openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
@@ -22,7 +20,7 @@ openshift_logging_curator_ops_cpu_limit: 100m
openshift_logging_curator_ops_memory_limit: null
openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}"
-openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' + openshift.common.dns_domain) }}"
+openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_memory_limit: null
openshift_logging_kibana_proxy_debug: false
@@ -46,7 +44,7 @@ openshift_logging_kibana_key: ""
#for the public facing kibana certs
openshift_logging_kibana_ca: ""
-openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' + openshift.common.dns_domain) }}"
+openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
openshift_logging_kibana_ops_cpu_limit: null
openshift_logging_kibana_ops_memory_limit: null
openshift_logging_kibana_ops_proxy_debug: false
@@ -54,6 +52,18 @@ openshift_logging_kibana_ops_proxy_cpu_limit: null
openshift_logging_kibana_ops_proxy_memory_limit: null
openshift_logging_kibana_ops_replica_count: 1
+#The absolute path on the control node to the cert file to use
+#for the public facing ops kibana certs
+openshift_logging_kibana_ops_cert: ""
+
+#The absolute path on the control node to the key file to use
+#for the public facing ops kibana certs
+openshift_logging_kibana_ops_key: ""
+
+#The absolute path on the control node to the CA file to use
+#for the public facing ops kibana certs
+openshift_logging_kibana_ops_ca: ""
+
openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
openshift_logging_fluentd_cpu_limit: 100m
openshift_logging_fluentd_memory_limit: 512Mi
@@ -70,6 +80,8 @@ openshift_logging_es_client_cert: /etc/fluent/keys/cert
openshift_logging_es_client_key: /etc/fluent/keys/key
openshift_logging_es_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
openshift_logging_es_cpu_limit: null
+# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
+openshift_logging_es_log_appenders: ['file']
openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}"
openshift_logging_es_pv_selector: null
openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
@@ -78,6 +90,10 @@ openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_
openshift_logging_es_recover_after_time: 5m
openshift_logging_es_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
openshift_logging_es_nodeselector: "{{ openshift_hosted_logging_elasticsearch_nodeselector | default('') | map_from_pairs }}"
+# openshift_logging_es_config is a hash to be merged into the defaults for the elasticsearch.yaml
+openshift_logging_es_config: {}
+openshift_logging_es_number_of_shards: 1
+openshift_logging_es_number_of_replicas: 0
# allow cluster-admin or cluster-reader to view operations index
openshift_logging_es_ops_allow_cluster_reader: False
@@ -97,9 +113,11 @@ openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_
openshift_logging_es_ops_recover_after_time: 5m
openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_es_ops_number_of_shards: 1
+openshift_logging_es_ops_number_of_replicas: 0
# storage related defaults
-openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default('ReadWriteOnce') }}"
+openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}"
# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
diff --git a/roles/openshift_logging/files/generate-jks.sh b/roles/openshift_logging/files/generate-jks.sh
index 9fe557f83..b5ba7f9d1 100644
--- a/roles/openshift_logging/files/generate-jks.sh
+++ b/roles/openshift_logging/files/generate-jks.sh
@@ -1,4 +1,4 @@
-#! /bin/sh
+#! /bin/bash
set -ex
function usage() {
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
index 9beffaef7..44b0b2d48 100644
--- a/roles/openshift_logging/filter_plugins/openshift_logging.py
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -5,6 +5,18 @@
import random
+def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'):
+ '''Return a hash with the desired storage for the given ES instance'''
+ deploy_config = os_logging_facts[root]['deploymentconfigs'].get(dc_name)
+ if deploy_config:
+ storage = deploy_config['volumes']['elasticsearch-storage']
+ if storage.get('hostPath'):
+ return dict(kind='hostpath', path=storage.get('hostPath').get('path'))
+ if len(pvc_claim.strip()) > 0:
+ return dict(kind='pvc', pvc_claim=pvc_claim)
+ return dict(kind='emptydir')
+
+
def random_word(source_alpha, length):
''' Returns a random word given the source of characters to pick from and resulting length '''
return ''.join(random.choice(source_alpha) for i in range(length))
@@ -44,4 +56,5 @@ class FilterModule(object):
'random_word': random_word,
'entry_from_named_pair': entry_from_named_pair,
'map_from_pairs': map_from_pairs,
+ 'es_storage': es_storage
}
diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml
index ab57242c8..9c480f73a 100644
--- a/roles/openshift_logging/meta/main.yaml
+++ b/roles/openshift_logging/meta/main.yaml
@@ -13,5 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
-- role: openshift_master_facts
- role: openshift_facts
diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml
index 8fcf517ad..253543f54 100644
--- a/roles/openshift_logging/tasks/generate_configmaps.yaml
+++ b/roles/openshift_logging/tasks/generate_configmaps.yaml
@@ -1,20 +1,39 @@
---
- block:
- - copy:
- src: elasticsearch-logging.yml
+ - fail:
+ msg: "The openshift_logging_es_log_appenders '{{openshift_logging_es_log_appenders}}' has an unrecognized option and only supports the following as a list: {{es_log_appenders | join(', ')}}"
+ when:
+ - es_logging_contents is undefined
+ - "{{ openshift_logging_es_log_appenders | list | difference(es_log_appenders) | length != 0 }}"
+ changed_when: no
+
+ - template:
+ src: elasticsearch-logging.yml.j2
dest: "{{mktemp.stdout}}/elasticsearch-logging.yml"
+ vars:
+ root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}"
when: es_logging_contents is undefined
changed_when: no
+ check_mode: no
- - template:
- src: elasticsearch.yml.j2
- dest: "{{mktemp.stdout}}/elasticsearch.yml"
+ - local_action: >
+ template src=elasticsearch.yml.j2
+ dest="{{local_tmp.stdout}}/elasticsearch-gen-template.yml"
vars:
- allow_cluster_reader: "{{openshift_logging_es_ops_allow_cluster_reader | lower | default('false')}}"
when: es_config_contents is undefined
changed_when: no
- copy:
+ content: "{{ config_source | combine(override_config,recursive=True) | to_nice_yaml }}"
+ dest: "{{mktemp.stdout}}/elasticsearch.yml"
+ vars:
+ config_source: "{{lookup('file','{{local_tmp.stdout}}/elasticsearch-gen-template.yml') | from_yaml }}"
+ override_config: "{{openshift_logging_es_config | from_yaml}}"
+ when: es_logging_contents is undefined
+ changed_when: no
+
+ - copy:
content: "{{es_logging_contents}}"
dest: "{{mktemp.stdout}}/elasticsearch-logging.yml"
when: es_logging_contents is defined
diff --git a/roles/openshift_logging/tasks/generate_jks.yaml b/roles/openshift_logging/tasks/generate_jks.yaml
index c6e2ccbc0..6e3204589 100644
--- a/roles/openshift_logging/tasks/generate_jks.yaml
+++ b/roles/openshift_logging/tasks/generate_jks.yaml
@@ -20,12 +20,6 @@
register: truststore_jks
check_mode: no
-- name: Create temp directory for doing work in
- local_action: command mktemp -d /tmp/openshift-logging-ansible-XXXXXX
- register: local_tmp
- changed_when: False
- check_mode: no
-
- name: Create placeholder for previously created JKS certs to prevent recreating...
local_action: file path="{{local_tmp.stdout}}/elasticsearch.jks" state=touch mode="u=rw,g=r,o=r"
when: elasticsearch_jks.stat.exists
@@ -92,7 +86,3 @@
src: "{{local_tmp.stdout}}/truststore.jks"
dest: "{{generated_certs_dir}}/truststore.jks"
when: not truststore_jks.stat.exists
-
-- name: Cleaning up temp dir
- local_action: file path="{{local_tmp.stdout}}" state=absent
- changed_when: False
diff --git a/roles/openshift_logging/tasks/generate_pems.yaml b/roles/openshift_logging/tasks/generate_pems.yaml
index 289b72ea6..e8cececfb 100644
--- a/roles/openshift_logging/tasks/generate_pems.yaml
+++ b/roles/openshift_logging/tasks/generate_pems.yaml
@@ -15,6 +15,7 @@
-subj "/CN={{component}}/OU=OpenShift/O=Logging/subjectAltName=DNS.1=localhost{{cert_ext.stdout}}" -days 712 -nodes
when:
- not key_file.stat.exists
+ - cert_ext is defined
- cert_ext.stdout is defined
check_mode: no
@@ -24,7 +25,7 @@
-subj "/CN={{component}}/OU=OpenShift/O=Logging" -days 712 -nodes
when:
- not key_file.stat.exists
- - cert_ext.stdout is undefined
+ - cert_ext is undefined or cert_ext is defined and cert_ext.stdout is undefined
check_mode: no
- name: Sign cert request with CA for {{component}}
diff --git a/roles/openshift_logging/tasks/generate_pvcs.yaml b/roles/openshift_logging/tasks/generate_pvcs.yaml
index e1629908f..fa7a86c27 100644
--- a/roles/openshift_logging/tasks/generate_pvcs.yaml
+++ b/roles/openshift_logging/tasks/generate_pvcs.yaml
@@ -15,8 +15,7 @@
vars:
obj_name: "{{claim_name}}"
size: "{{es_pvc_size}}"
- access_modes:
- - "{{ es_access_modes }}"
+ access_modes: "{{ es_access_modes | list }}"
pv_selector: "{{es_pv_selector}}"
with_items:
- "{{es_pvc_pool | default([])}}"
@@ -35,8 +34,7 @@
annotations:
volume.alpha.kubernetes.io/storage-class: "dynamic"
size: "{{es_pvc_size}}"
- access_modes:
- - "{{ es_access_modes }}"
+ access_modes: "{{ es_access_modes | list }}"
pv_selector: "{{es_pv_selector}}"
with_items:
- "{{es_pvc_pool|default([])}}"
diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml
index 7af17a708..e77da7a24 100644
--- a/roles/openshift_logging/tasks/generate_routes.yaml
+++ b/roles/openshift_logging/tasks/generate_routes.yaml
@@ -16,12 +16,12 @@
changed_when: false
- name: Generating logging routes
- template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-{{route_info.name}}-route.yaml
+ template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-kibana-route.yaml
tags: routes
vars:
- obj_name: "{{route_info.name}}"
- route_host: "{{route_info.host}}"
- service_name: "{{route_info.name}}"
+ obj_name: "logging-kibana"
+ route_host: "{{openshift_logging_kibana_hostname}}"
+ service_name: "logging-kibana"
tls_key: "{{kibana_key | default('') | b64decode}}"
tls_cert: "{{kibana_cert | default('') | b64decode}}"
tls_ca_cert: "{{kibana_ca | b64decode}}"
@@ -31,10 +31,47 @@
component: support
logging-infra: support
provider: openshift
- with_items:
- - {name: logging-kibana, host: "{{openshift_logging_kibana_hostname}}"}
- - {name: logging-kibana-ops, host: "{{openshift_logging_kibana_ops_hostname}}"}
- loop_control:
- loop_var: route_info
- when: (route_info.name == 'logging-kibana-ops' and openshift_logging_use_ops | bool) or route_info.name == 'logging-kibana'
+ changed_when: no
+
+- set_fact: kibana_ops_key={{ lookup('file', openshift_logging_kibana_ops_key) | b64encode }}
+ when:
+ - openshift_logging_use_ops | bool
+ - "{{ openshift_logging_kibana_ops_key | trim | length > 0 }}"
+ changed_when: false
+
+- set_fact: kibana_ops_cert={{ lookup('file', openshift_logging_kibana_ops_cert)| b64encode }}
+ when:
+ - openshift_logging_use_ops | bool
+ - "{{openshift_logging_kibana_ops_cert | trim | length > 0}}"
+ changed_when: false
+
+- set_fact: kibana_ops_ca={{ lookup('file', openshift_logging_kibana_ops_ca)| b64encode }}
+ when:
+ - openshift_logging_use_ops | bool
+ - "{{openshift_logging_kibana_ops_ca | trim | length > 0}}"
+ changed_when: false
+
+- set_fact: kibana_ops_ca={{key_pairs | entry_from_named_pair('ca_file') }}
+ when:
+ - openshift_logging_use_ops | bool
+ - kibana_ops_ca is not defined
+ changed_when: false
+
+- name: Generating logging ops routes
+ template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-kibana-ops-route.yaml
+ tags: routes
+ vars:
+ obj_name: "logging-kibana-ops"
+ route_host: "{{openshift_logging_kibana_ops_hostname}}"
+ service_name: "logging-kibana-ops"
+ tls_key: "{{kibana_ops_key | default('') | b64decode}}"
+ tls_cert: "{{kibana_ops_cert | default('') | b64decode}}"
+ tls_ca_cert: "{{kibana_ops_ca | b64decode}}"
+ tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
+ edge_term_policy: "{{openshift_logging_kibana_edge_term_policy | default('') }}"
+ labels:
+ component: support
+ logging-infra: support
+ provider: openshift
+ when: openshift_logging_use_ops | bool
changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml
index 81fac8b5e..f396bcc6d 100644
--- a/roles/openshift_logging/tasks/generate_secrets.yaml
+++ b/roles/openshift_logging/tasks/generate_secrets.yaml
@@ -31,8 +31,6 @@
- fluentd
loop_control:
loop_var: component
- when: secret_name not in openshift_logging_facts.{{component}}.secrets or
- secret_keys | difference(openshift_logging_facts.{{component}}.secrets["{{secret_name}}"]["keys"]) | length != 0
check_mode: no
changed_when: no
@@ -50,8 +48,6 @@
kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}"
kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}"
server_tls_file: "{{key_pairs | entry_from_named_pair('server_tls')| b64decode }}"
- when: secret_name not in openshift_logging_facts.kibana.secrets or
- secret_keys | difference(openshift_logging_facts.kibana.secrets["{{secret_name}}"]["keys"]) | length != 0
check_mode: no
changed_when: no
@@ -64,10 +60,8 @@
admin-ca={{generated_certs_dir}}/ca.crt admin.jks={{generated_certs_dir}}/system.admin.jks -o yaml
vars:
secret_name: logging-elasticsearch
- secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key"]
+ secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key", "searchguard.truststore"]
register: logging_es_secret
- when: secret_name not in openshift_logging_facts.elasticsearch.secrets or
- secret_keys | difference(openshift_logging_facts.elasticsearch.secrets["{{secret_name}}"]["keys"]) | length != 0
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
index a0ad12d94..28fad420b 100644
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -2,6 +2,13 @@
- name: Getting current ES deployment size
set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }}
+- set_fact: openshift_logging_es_pvc_prefix="logging-es"
+ when: "not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''"
+
+- set_fact: es_pvc_pool={{[]}}
+
+- set_fact: openshift_logging_es_pvc_prefix="{{ openshift_logging_es_pvc_prefix | default('logging-es') }}"
+
- name: Generate PersistentVolumeClaims
include: "{{ role_path}}/tasks/generate_pvcs.yaml"
vars:
@@ -42,10 +49,12 @@
es_cluster_name: "{{component}}"
es_cpu_limit: "{{openshift_logging_es_cpu_limit }}"
es_memory_limit: "{{openshift_logging_es_memory_limit}}"
- volume_names: "{{es_pvc_pool | default([])}}"
- pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}"
+ pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}"
deploy_name: "{{item.1}}"
es_node_selector: "{{openshift_logging_es_nodeselector | default({}) }}"
+ es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim)}}"
+ es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}"
+ es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}"
with_indexed_items:
- "{{ es_dc_pool }}"
check_mode: no
@@ -56,6 +65,8 @@
- name: Getting current ES deployment size
set_fact: openshift_logging_current_es_ops_size={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length }}
+- set_fact: openshift_logging_es_ops_pvc_prefix="{{ openshift_logging_es_ops_pvc_prefix | default('logging-es-ops') }}"
+
- name: Validate Elasticsearch cluster size for Ops
fail: msg="The openshift_logging_es_ops_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
vars:
@@ -66,6 +77,9 @@
- "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}"
check_mode: no
+- set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops"
+ when: "not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''"
+
- set_fact: es_pvc_pool={{[]}}
- name: Generate PersistentVolumeClaims for Ops
@@ -111,8 +125,7 @@
logging_component: elasticsearch
deploy_name_prefix: "logging-{{component}}"
image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- volume_names: "{{es_pvc_pool | default([])}}"
- pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}"
+ pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}"
deploy_name: "{{item.1}}"
es_cluster_name: "{{component}}"
es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}"
@@ -121,7 +134,10 @@
es_recover_after_nodes: "{{es_ops_recover_after_nodes}}"
es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}"
openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
- es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({}) | map_from_pairs }}"
+ es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({}) }}"
+ es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim,root='elasticsearch_ops')}}"
+ es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}"
+ es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}"
with_indexed_items:
- "{{ es_ops_dc_pool | default([]) }}"
when:
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index 4c718805e..c7f4a2f93 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -3,6 +3,17 @@
msg: Only one Fluentd nodeselector key pair should be provided
when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1"
+- name: Set default image variables based on deployment_type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: Set logging image facts
+ set_fact:
+ openshift_logging_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}"
+ openshift_logging_image_version: "{{ openshift_logging_image_version | default(__openshift_logging_image_version) }}"
+
- name: Create temp directory for doing work in
command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
register: mktemp
@@ -12,6 +23,14 @@
- debug: msg="Created temp dir {{mktemp.stdout}}"
+- name: Create local temp directory for doing work in
+ local_action: command mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: local_tmp
+ changed_when: False
+ check_mode: no
+
+- debug: msg="Created local temp dir {{local_tmp.stdout}}"
+
- name: Copy the admin client config(s)
command: >
cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
@@ -37,3 +56,8 @@
tags: logging_cleanup
changed_when: False
check_mode: no
+
+- name: Cleaning up local temp dir
+ local_action: file path="{{local_tmp.stdout}}" state=absent
+ tags: logging_cleanup
+ changed_when: False
diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml
index 44dd5e894..7ab140357 100644
--- a/roles/openshift_logging/tasks/procure_server_certs.yaml
+++ b/roles/openshift_logging/tasks/procure_server_certs.yaml
@@ -11,12 +11,18 @@
- name: Trying to discover server cert variable name for {{ cert_info.procure_component }}
set_fact: procure_component_crt={{ lookup('env', '{{cert_info.procure_component}}' + '_crt') }}
- when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined
+ when:
+ - cert_info.hostnames is undefined
+ - cert_info[ cert_info.procure_component + '_crt' ] is defined
+ - cert_info[ cert_info.procure_component + '_key' ] is defined
check_mode: no
- name: Trying to discover the server key variable name for {{ cert_info.procure_component }}
set_fact: procure_component_key={{ lookup('env', '{{cert_info.procure_component}}' + '_key') }}
- when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined
+ when:
+ - cert_info.hostnames is undefined
+ - cert_info[ cert_info.procure_component + '_crt' ] is defined
+ - cert_info[ cert_info.procure_component + '_key' ] is defined
check_mode: no
- name: Creating signed server cert and key for {{ cert_info.procure_component }}
@@ -27,26 +33,26 @@
--signer-serial={{generated_certs_dir}}/ca.serial.txt
check_mode: no
when:
- - cert_info.hostnames is defined
- - not component_key_file.stat.exists
- - not component_cert_file.stat.exists
+ - cert_info.hostnames is defined
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
- name: Copying server key for {{ cert_info.procure_component }} to generated certs directory
copy: content="{{procure_component_key}}" dest={{generated_certs_dir}}/{{cert_info.procure_component}}.key
check_mode: no
when:
- - cert_info.hostnames is undefined
- - "{{ cert_info.procure_component }}_crt is defined"
- - "{{ cert_info.procure_component }}_key is defined"
- - not component_key_file.stat.exists
- - not component_cert_file.stat.exists
+ - cert_info.hostnames is undefined
+ - cert_info[ cert_info.procure_component + '_crt' ] is defined
+ - cert_info[ cert_info.procure_component + '_key' ] is defined
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
- name: Copying Server cert for {{ cert_info.procure_component }} to generated certs directory
copy: content="{{procure_component_crt}}" dest={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
check_mode: no
when:
- - cert_info.hostnames is undefined
- - "{{ cert_info.procure_component }}_crt is defined"
- - "{{ cert_info.procure_component }}_key is defined"
- - not component_key_file.stat.exists
- - not component_cert_file.stat.exists
+ - cert_info.hostnames is undefined
+ - cert_info[ cert_info.procure_component + '_crt' ] is defined
+ - cert_info[ cert_info.procure_component + '_key' ] is defined
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
diff --git a/roles/openshift_logging/files/elasticsearch-logging.yml b/roles/openshift_logging/templates/elasticsearch-logging.yml.j2
index 377abe21f..499e77fb7 100644
--- a/roles/openshift_logging/files/elasticsearch-logging.yml
+++ b/roles/openshift_logging/templates/elasticsearch-logging.yml.j2
@@ -1,14 +1,25 @@
# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
es.logger.level: INFO
-rootLogger: ${es.logger.level}, console, file
+rootLogger: ${es.logger.level}, {{root_logger}}
logger:
# log action execution errors for easier debugging
action: WARN
+
+ # deprecation logging, turn to DEBUG to see them
+ deprecation: WARN, deprecation_log_file
+
# reduce the logging for aws, too much is logged under the default INFO
com.amazonaws: WARN
+
io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL}
io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL}
+ # aws will try to do some sketchy JMX stuff, but its not needed.
+ com.amazonaws.jmx.SdkMBeanRegistrySupport: ERROR
+ com.amazonaws.metrics.AwsSdkMetrics: ERROR
+
+ org.apache.http: INFO
+
# gateway
#gateway: DEBUG
#index.gateway: DEBUG
@@ -28,13 +39,14 @@ logger:
additivity:
index.search.slowlog: false
index.indexing.slowlog: false
+ deprecation: false
appender:
console:
type: console
layout:
type: consolePattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %.10000m%n"
file:
type: dailyRollingFile
@@ -44,16 +56,13 @@ appender:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
- # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.
- # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html
- #file:
- #type: extrasRollingFile
- #file: ${path.logs}/${cluster.name}.log
- #rollingPolicy: timeBased
- #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz
- #layout:
- #type: pattern
- #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+ deprecation_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_deprecation.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
index_search_slow_log_file:
type: dailyRollingFile
diff --git a/roles/openshift_logging/templates/elasticsearch.yml.j2 b/roles/openshift_logging/templates/elasticsearch.yml.j2
index f2d098f10..93c4d854c 100644
--- a/roles/openshift_logging/templates/elasticsearch.yml.j2
+++ b/roles/openshift_logging/templates/elasticsearch.yml.j2
@@ -6,9 +6,8 @@ script:
indexed: on
index:
- number_of_shards: 1
- number_of_replicas: 0
- auto_expand_replicas: 0-2
+ number_of_shards: {{ es_number_of_shards | default ('1') }}
+ number_of_replicas: {{ es_number_of_replicas | default ('0') }}
unassigned.node_left.delayed_timeout: 2m
translog:
flush_threshold_size: 256mb
@@ -29,6 +28,7 @@ cloud:
discovery:
type: kubernetes
zen.ping.multicast.enabled: false
+ zen.minimum_master_nodes: {{es_min_masters}}
gateway:
expected_master_nodes: ${NODE_QUORUM}
@@ -37,6 +37,8 @@ gateway:
recover_after_time: ${RECOVER_AFTER_TIME}
io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"]
+io.fabric8.elasticsearch.kibana.mapping.app: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
+io.fabric8.elasticsearch.kibana.mapping.ops: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
openshift.config:
use_common_data_model: true
@@ -47,7 +49,7 @@ openshift.searchguard:
keystore.path: /etc/elasticsearch/secret/admin.jks
truststore.path: /etc/elasticsearch/secret/searchguard.truststore
-openshift.operations.allow_cluster_reader: {{allow_cluster_reader | default ('false')}}
+openshift.operations.allow_cluster_reader: {{allow_cluster_reader | default (false)}}
path:
data: /elasticsearch/persistent/${CLUSTER_NAME}/data
diff --git a/roles/openshift_logging/templates/es-storage-emptydir.partial b/roles/openshift_logging/templates/es-storage-emptydir.partial
new file mode 100644
index 000000000..ccd01a816
--- /dev/null
+++ b/roles/openshift_logging/templates/es-storage-emptydir.partial
@@ -0,0 +1 @@
+ emptyDir: {}
diff --git a/roles/openshift_logging/templates/es-storage-hostpath.partial b/roles/openshift_logging/templates/es-storage-hostpath.partial
new file mode 100644
index 000000000..07ddad9ba
--- /dev/null
+++ b/roles/openshift_logging/templates/es-storage-hostpath.partial
@@ -0,0 +1,2 @@
+ hostPath:
+ path: {{es_storage['path']}}
diff --git a/roles/openshift_logging/templates/es-storage-pvc.partial b/roles/openshift_logging/templates/es-storage-pvc.partial
new file mode 100644
index 000000000..fcbff68de
--- /dev/null
+++ b/roles/openshift_logging/templates/es-storage-pvc.partial
@@ -0,0 +1,2 @@
+ persistentVolumeClaim:
+ claimName: {{es_storage['pvc_claim']}}
diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2
index 81ae070be..16185fc1d 100644
--- a/roles/openshift_logging/templates/es.j2
+++ b/roles/openshift_logging/templates/es.j2
@@ -103,9 +103,4 @@ spec:
configMap:
name: logging-elasticsearch
- name: elasticsearch-storage
-{% if pvc_claim is defined and pvc_claim | trim | length > 0 %}
- persistentVolumeClaim:
- claimName: {{pvc_claim}}
-{% else %}
- emptyDir: {}
-{% endif %}
+{% include 'es-storage-'+ es_storage['kind'] + '.partial' %}
diff --git a/roles/openshift_logging/templates/pvc.j2 b/roles/openshift_logging/templates/pvc.j2
index f19a3a750..07d81afff 100644
--- a/roles/openshift_logging/templates/pvc.j2
+++ b/roles/openshift_logging/templates/pvc.j2
@@ -1,7 +1,7 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: {{obj_name}}
+ name: "{{obj_name}}"
labels:
logging-infra: support
{% if annotations is defined %}
diff --git a/roles/openshift_logging/vars/default_images.yml b/roles/openshift_logging/vars/default_images.yml
new file mode 100644
index 000000000..1a77808f6
--- /dev/null
+++ b/roles/openshift_logging/vars/default_images.yml
@@ -0,0 +1,3 @@
+---
+__openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
diff --git a/roles/openshift_logging/vars/main.yaml b/roles/openshift_logging/vars/main.yaml
index 07cc05683..e06625e3f 100644
--- a/roles/openshift_logging/vars/main.yaml
+++ b/roles/openshift_logging/vars/main.yaml
@@ -1,8 +1,12 @@
---
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
es_node_quorum: "{{openshift_logging_es_cluster_size|int/2 + 1}}"
+es_min_masters_default: "{{ (openshift_logging_es_cluster_size | int / 2 | round(0,'floor') + 1) | int }}"
+es_min_masters: "{{ (openshift_logging_es_cluster_size == 1) | ternary(1, es_min_masters_default)}}"
es_recover_after_nodes: "{{openshift_logging_es_cluster_size|int - 1}}"
es_recover_expected_nodes: "{{openshift_logging_es_cluster_size|int}}"
es_ops_node_quorum: "{{openshift_logging_es_ops_cluster_size|int/2 + 1}}"
es_ops_recover_after_nodes: "{{openshift_logging_es_ops_cluster_size|int - 1}}"
es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size|int}}"
+
+es_log_appenders: ['file', 'console']
diff --git a/roles/openshift_logging/vars/openshift-enterprise.yml b/roles/openshift_logging/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..9679d209a
--- /dev/null
+++ b/roles/openshift_logging/vars/openshift-enterprise.yml
@@ -0,0 +1,3 @@
+---
+__openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}"
+__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default(openshift_release | default ('3.5.0') ) }}"
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index 9a883feed..f67aee88b 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -39,7 +39,7 @@
delegate_to: "{{ openshift_master_host }}"
- name: Set node schedulability
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: "{{ 'true' if openshift.node.schedulable | bool else 'false' }}"
retries: 10
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index af3e7eeec..907f25bc5 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -12,6 +12,7 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: lib_openshift
- role: openshift_master_facts
- role: openshift_hosted_facts
- role: openshift_master_certificates
@@ -40,8 +41,6 @@ dependencies:
port: 4001/tcp
when: groups.oo_etcd_to_config | default([]) | length == 0
- role: nickhammond.logrotate
-- role: nuage_master
- when: openshift.common.use_nuage | bool
- role: contiv
contiv_role: netmaster
when: openshift.common.use_contiv | bool
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 2ef61cddf..98e0da1a2 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -249,7 +249,7 @@
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
command: >
- curl --silent
+ curl --silent --tlsv1.2
{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
{% else %}
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index 1b3e0dba1..8f77d40ce 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -1,8 +1,4 @@
---
-- name: Load lib_openshift modules
- include_role:
- name: lib_openshift
-
- name: Pre-pull master system container image
command: >
atomic pull --storage=ostree {{ openshift.common.system_images_registry }}/{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index eef0f414e..155abd970 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -12,7 +12,7 @@ Requires=docker.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
LimitNOFILE=131072
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index aec48386e..938ac2a12 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -35,6 +35,15 @@ assetConfig:
keyFile: master.server.key
maxRequestsInFlight: 0
requestTimeoutSeconds: 0
+{% if openshift_master_min_tls_version is defined %}
+ minTLSVersion: {{ openshift_master_min_tls_version }}
+{% endif %}
+{% if openshift_master_cipher_suites is defined %}
+ cipherSuites:
+{% for cipher_suite in openshift_master_cipher_suites %}
+ - {{ cipher_suite }}
+{% endfor %}
+{% endif %}
{% if openshift_master_ha | bool %}
{% if openshift.master.audit_config | default(none) is not none and openshift.common.version_gte_3_2_or_1_2 | bool %}
auditConfig:{{ openshift.master.audit_config | to_padded_yaml(level=1) }}
@@ -256,5 +265,14 @@ servingInfo:
{% endfor %}
{% endfor %}
{% endif %}
+{% if openshift_master_min_tls_version is defined %}
+ minTLSVersion: {{ openshift_master_min_tls_version }}
+{% endif %}
+{% if openshift_master_cipher_suites is defined %}
+ cipherSuites:
+{% for cipher_suite in openshift_master_cipher_suites %}
+ - {{ cipher_suite }}
+{% endfor %}
+{% endif %}
volumeConfig:
dynamicProvisioningEnabled: {{ openshift.master.dynamic_provisioning_enabled }}
diff --git a/roles/openshift_master/templates/master_docker/master.docker.service.j2 b/roles/openshift_master/templates/master_docker/master.docker.service.j2
index be7644710..13381cd1a 100644
--- a/roles/openshift_master/templates/master_docker/master.docker.service.j2
+++ b/roles/openshift_master/templates/master_docker/master.docker.service.j2
@@ -8,7 +8,7 @@ Wants=etcd_container.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-master
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master
Restart=always
diff --git a/roles/openshift_master_certificates/README.md b/roles/openshift_master_certificates/README.md
index a80d47040..4758bbdfb 100644
--- a/roles/openshift_master_certificates/README.md
+++ b/roles/openshift_master_certificates/README.md
@@ -21,6 +21,7 @@ From this role:
|---------------------------------------|---------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|
| openshift_generated_configs_dir | `{{ openshift.common.config_base }}/generated-configs` | Directory in which per-master generated config directories will be created on the `openshift_ca_host`. |
| openshift_master_cert_subdir | `master-{{ openshift.common.hostname }}` | Directory within `openshift_generated_configs_dir` where per-master configurations will be placed on the `openshift_ca_host`. |
+| openshift_master_cert_expire_days | `730` (2 years) | Validity of the certificates in days. Works only with OpenShift version 1.5 (3.5) and later. |
| openshift_master_config_dir | `{{ openshift.common.config_base }}/master` | Master configuration directory in which certificates will be deployed on masters. |
| openshift_master_generated_config_dir | `{{ openshift_generated_configs_dir }}/{{ openshift_master_cert_subdir }` | Full path to the per-master generated config directory. |
diff --git a/roles/openshift_master_certificates/defaults/main.yml b/roles/openshift_master_certificates/defaults/main.yml
new file mode 100644
index 000000000..dba62c4ec
--- /dev/null
+++ b/roles/openshift_master_certificates/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+openshift_master_cert_expire_days: 730
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 61541acb8..d4c9a96ca 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -57,6 +57,9 @@
--hostnames={{ hostvars[item].openshift.common.all_hostnames | join(',') }}
--cert={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/master.server.crt
--key={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/master.server.key
+ {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
+ --expire-days={{ openshift_master_cert_expire_days }}
+ {% endif %}
--signer-cert={{ openshift_ca_cert }}
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
@@ -84,6 +87,9 @@
--signer-serial={{ openshift_ca_serial }}
--user=system:openshift-master
--basename=openshift-master
+ {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
+ --expire-days={{ openshift_master_cert_expire_days }}
+ {% endif %}
args:
creates: "{{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/openshift-master.kubeconfig"
with_items: "{{ hostvars
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index db24028cd..e570392ff 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -7,12 +7,19 @@ Custom filters for use in openshift-master
import copy
import sys
+# pylint import-error disabled because pylint cannot find the package
+# when installed in a virtualenv
from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.filter.core import to_bool as ansible_bool
-from six import string_types
+
+# ansible.compat.six goes away with Ansible 2.4
+try:
+ from ansible.compat.six import string_types, u
+except ImportError:
+ from ansible.module_utils.six import string_types, u
import yaml
@@ -486,10 +493,10 @@ class FilterModule(object):
idp_list.append(idp_inst)
IdentityProviderBase.validate_idp_list(idp_list, openshift_version, deployment_type)
- return yaml.dump([idp.to_dict() for idp in idp_list],
- allow_unicode=True,
- default_flow_style=False,
- Dumper=AnsibleDumper)
+ return u(yaml.dump([idp.to_dict() for idp in idp_list],
+ allow_unicode=True,
+ default_flow_style=False,
+ Dumper=AnsibleDumper))
@staticmethod
def validate_pcs_cluster(data, masters=None):
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index db4a0e1fc..1d3db8a1a 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -1,8 +1,6 @@
---
openshift_metrics_start_cluster: True
openshift_metrics_install_metrics: True
-openshift_metrics_image_prefix: docker.io/openshift/origin-
-openshift_metrics_image_version: latest
openshift_metrics_startup_timeout: 500
openshift_metrics_hawkular_replicas: 1
@@ -47,7 +45,7 @@ openshift_metrics_node_id: nodename
openshift_metrics_project: openshift-infra
openshift_metrics_cassandra_pvc_prefix: "{{ openshift_hosted_metrics_storage_volume_name | default('metrics-cassandra') }}"
-openshift_metrics_cassandra_pvc_access: "{{ openshift_hosted_metrics_storage_access_modes | default('ReadWriteOnce') }}"
+openshift_metrics_cassandra_pvc_access: "{{ openshift_hosted_metrics_storage_access_modes | default(['ReadWriteOnce']) }}"
openshift_metrics_hawkular_user_write_access: False
diff --git a/roles/openshift_metrics/files/import_jks_certs.sh b/roles/openshift_metrics/files/import_jks_certs.sh
index c8d5bb3d2..f977b6dd6 100755
--- a/roles/openshift_metrics/files/import_jks_certs.sh
+++ b/roles/openshift_metrics/files/import_jks_certs.sh
@@ -20,12 +20,8 @@ set -ex
function import_certs() {
dir=$CERT_DIR
- hawkular_metrics_keystore_password=$(echo $METRICS_KEYSTORE_PASSWD | base64 -d)
- hawkular_cassandra_keystore_password=$(echo $CASSANDRA_KEYSTORE_PASSWD | base64 -d)
- hawkular_metrics_truststore_password=$(echo $METRICS_TRUSTSTORE_PASSWD | base64 -d)
- hawkular_cassandra_truststore_password=$(echo $CASSANDRA_TRUSTSTORE_PASSWD | base64 -d)
-
- cassandra_alias=`keytool -noprompt -list -keystore $dir/hawkular-cassandra.truststore -storepass ${hawkular_cassandra_truststore_password} | sed -n '7~2s/,.*$//p'`
+ hawkular_metrics_keystore_password=$(echo $METRICS_KEYSTORE_PASSWD | base64 --decode)
+ hawkular_metrics_truststore_password=$(echo $METRICS_TRUSTSTORE_PASSWD | base64 --decode)
hawkular_alias=`keytool -noprompt -list -keystore $dir/hawkular-metrics.truststore -storepass ${hawkular_metrics_truststore_password} | sed -n '7~2s/,.*$//p'`
if [ ! -f $dir/hawkular-metrics.keystore ]; then
@@ -39,56 +35,7 @@ function import_certs() {
-deststorepass $hawkular_metrics_keystore_password
fi
- if [ ! -f $dir/hawkular-cassandra.keystore ]; then
- echo "Creating the Hawkular Cassandra keystore from the PEM file"
- keytool -importkeystore -v \
- -srckeystore $dir/hawkular-cassandra.pkcs12 \
- -destkeystore $dir/hawkular-cassandra.keystore \
- -srcstoretype PKCS12 \
- -deststoretype JKS \
- -srcstorepass $hawkular_cassandra_keystore_password \
- -deststorepass $hawkular_cassandra_keystore_password
- fi
-
- if [[ ! ${cassandra_alias[*]} =~ hawkular-metrics ]]; then
- echo "Importing the Hawkular Certificate into the Cassandra Truststore"
- keytool -noprompt -import -v -trustcacerts -alias hawkular-metrics \
- -file $dir/hawkular-metrics.crt \
- -keystore $dir/hawkular-cassandra.truststore \
- -trustcacerts \
- -storepass $hawkular_cassandra_truststore_password
- fi
-
- if [[ ! ${hawkular_alias[*]} =~ hawkular-cassandra ]]; then
- echo "Importing the Cassandra Certificate into the Hawkular Truststore"
- keytool -noprompt -import -v -trustcacerts -alias hawkular-cassandra \
- -file $dir/hawkular-cassandra.crt \
- -keystore $dir/hawkular-metrics.truststore \
- -trustcacerts \
- -storepass $hawkular_metrics_truststore_password
- fi
-
- if [[ ! ${cassandra_alias[*]} =~ hawkular-cassandra ]]; then
- echo "Importing the Hawkular Cassandra Certificate into the Cassandra Truststore"
- keytool -noprompt -import -v -trustcacerts -alias hawkular-cassandra \
- -file $dir/hawkular-cassandra.crt \
- -keystore $dir/hawkular-cassandra.truststore \
- -trustcacerts \
- -storepass $hawkular_cassandra_truststore_password
- fi
-
- cert_alias_names=(ca metricca cassandraca)
-
- for cert_alias in ${cert_alias_names[*]}; do
- if [[ ! ${cassandra_alias[*]} =~ "$cert_alias" ]]; then
- echo "Importing the CA Certificate with alias $cert_alias into the Cassandra Truststore"
- keytool -noprompt -import -v -trustcacerts -alias $cert_alias \
- -file ${dir}/ca.crt \
- -keystore $dir/hawkular-cassandra.truststore \
- -trustcacerts \
- -storepass $hawkular_cassandra_truststore_password
- fi
- done
+ cert_alias_names=(ca metricca)
for cert_alias in ${cert_alias_names[*]}; do
if [[ ! ${hawkular_alias[*]} =~ "$cert_alias" ]]; then
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
new file mode 100644
index 000000000..ffb812271
--- /dev/null
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -0,0 +1,26 @@
+---
+- name: restart master
+ systemd: name={{ openshift.common.service_type }}-master state=restarted
+ when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
+ notify: Verify API Server
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
+ {{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 61a240a33..01fc1ef64 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -13,9 +13,6 @@
hostnames: hawkular-cassandra
changed_when: no
-- slurp: src={{ mktemp.stdout }}/hawkular-cassandra-truststore.pwd
- register: cassandra_truststore_password
-
- slurp: src={{ mktemp.stdout }}/hawkular-metrics-truststore.pwd
register: hawkular_truststore_password
@@ -67,11 +64,8 @@
- hawkular-metrics.pwd
- hawkular-metrics.htpasswd
- hawkular-cassandra.crt
+ - hawkular-cassandra.key
- hawkular-cassandra.pem
- - hawkular-cassandra.keystore
- - hawkular-cassandra-keystore.pwd
- - hawkular-cassandra.truststore
- - hawkular-cassandra-truststore.pwd
changed_when: false
- set_fact:
@@ -136,38 +130,21 @@
- name: generate cassandra secret template
template:
src: secret.j2
- dest: "{{ mktemp.stdout }}/templates/cassandra_secrets.yaml"
+ dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-certs.yaml"
vars:
- name: hawkular-cassandra-secrets
+ name: hawkular-cassandra-certs
labels:
- metrics-infra: hawkular-cassandra
+ metrics-infra: hawkular-cassandra-certs
+ annotations:
+ service.alpha.openshift.io/originating-service-name: hawkular-cassandra
data:
- cassandra.keystore: >
- {{ hawkular_secrets['hawkular-cassandra.keystore'] }}
- cassandra.keystore.password: >
- {{ hawkular_secrets['hawkular-cassandra-keystore.pwd'] }}
- cassandra.keystore.alias: "{{ 'hawkular-cassandra'|b64encode }}"
- cassandra.truststore: >
- {{ hawkular_secrets['hawkular-cassandra.truststore'] }}
- cassandra.truststore.password: >
- {{ hawkular_secrets['hawkular-cassandra-truststore.pwd'] }}
- cassandra.pem: >
- {{ hawkular_secrets['hawkular-cassandra.pem'] }}
- when: name not in metrics_secrets
- changed_when: no
-
-- name: generate cassandra-certificate secret template
- template:
- src: secret.j2
- dest: "{{ mktemp.stdout }}/templates/cassandra_certificate.yaml"
- vars:
- name: hawkular-cassandra-certificate
- labels:
- metrics-infra: hawkular-cassandra
- data:
- cassandra.certificate: >
+ tls.crt: >
{{ hawkular_secrets['hawkular-cassandra.crt'] }}
- cassandra-ca.certificate: >
- {{ hawkular_secrets['hawkular-cassandra.pem'] }}
- when: name not in metrics_secrets.stdout_lines
+ tls.key: >
+ {{ hawkular_secrets['hawkular-cassandra.key'] }}
+ tls.peer.truststore.crt: >
+ {{ hawkular_secrets['hawkular-cassandra.crt'] }}
+ tls.client.truststore.crt: >
+ {{ hawkular_secrets['hawkular-metrics.crt'] }}
+ when: name not in metrics_secrets
changed_when: no
diff --git a/roles/openshift_metrics/tasks/import_jks_certs.yaml b/roles/openshift_metrics/tasks/import_jks_certs.yaml
index 2a67dad0e..e098145e9 100644
--- a/roles/openshift_metrics/tasks/import_jks_certs.yaml
+++ b/roles/openshift_metrics/tasks/import_jks_certs.yaml
@@ -1,12 +1,4 @@
---
-- stat: path="{{mktemp.stdout}}/hawkular-cassandra.keystore"
- register: cassandra_keystore
- check_mode: no
-
-- stat: path="{{mktemp.stdout}}/hawkular-cassandra.truststore"
- register: cassandra_truststore
- check_mode: no
-
- stat: path="{{mktemp.stdout}}/hawkular-metrics.keystore"
register: metrics_keystore
check_mode: no
@@ -19,9 +11,6 @@
- slurp: src={{ mktemp.stdout }}/hawkular-metrics-keystore.pwd
register: metrics_keystore_password
- - slurp: src={{ mktemp.stdout }}/hawkular-cassandra-keystore.pwd
- register: cassandra_keystore_password
-
- fetch:
dest: "{{local_tmp.stdout}}/"
src: "{{ mktemp.stdout }}/{{item}}"
@@ -29,18 +18,14 @@
changed_when: False
with_items:
- hawkular-metrics.pkcs12
- - hawkular-cassandra.pkcs12
- hawkular-metrics.crt
- - hawkular-cassandra.crt
- ca.crt
- local_action: command {{role_path}}/files/import_jks_certs.sh
environment:
CERT_DIR: "{{local_tmp.stdout}}"
METRICS_KEYSTORE_PASSWD: "{{metrics_keystore_password.content}}"
- CASSANDRA_KEYSTORE_PASSWD: "{{cassandra_keystore_password.content}}"
METRICS_TRUSTSTORE_PASSWD: "{{hawkular_truststore_password.content}}"
- CASSANDRA_TRUSTSTORE_PASSWD: "{{cassandra_truststore_password.content}}"
changed_when: False
- copy:
@@ -49,6 +34,4 @@
with_fileglob: "{{local_tmp.stdout}}/*.*store"
when: not metrics_keystore.stat.exists or
- not metrics_truststore.stat.exists or
- not cassandra_keystore.stat.exists or
- not cassandra_truststore.stat.exists
+ not metrics_truststore.stat.exists
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 66c81562b..a467c1a51 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -22,6 +22,9 @@
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
changed_when: false
+- set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics"
+ when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''"
+
- name: generate hawkular-cassandra persistent volume claims
template:
src: pvc.j2
@@ -30,8 +33,7 @@
obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}"
labels:
metrics-infra: hawkular-cassandra
- access_modes:
- - "{{ openshift_metrics_cassandra_pvc_access }}"
+ access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when:
@@ -49,8 +51,7 @@
metrics-infra: hawkular-cassandra
annotations:
volume.alpha.kubernetes.io/storage-class: dynamic
- access_modes:
- - "{{ openshift_metrics_cassandra_pvc_access }}"
+ access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when: openshift_metrics_cassandra_storage_type == 'dynamic'
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index 66a3abdbd..ffe6f63a2 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -34,6 +34,8 @@
file_content: "{{ item.content | b64decode | from_yaml }}"
with_items: "{{ object_defs.results }}"
+- include: update_master_config.yaml
+
- command: >
{{openshift.common.client_binary}}
--config={{mktemp.stdout}}/admin.kubeconfig
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 1eebff3bf..c8d222c60 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -1,4 +1,16 @@
---
+
+- name: Set default image variables based on deployment_type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: Set metrics image facts
+ set_fact:
+ openshift_metrics_image_prefix: "{{ openshift_metrics_image_prefix | default(__openshift_metrics_image_prefix) }}"
+ openshift_metrics_image_version: "{{ openshift_metrics_image_version | default(__openshift_metrics_image_version) }}"
+
- name: Create temp directory for doing work in on target
command: mktemp -td openshift-metrics-ansible-XXXXXX
register: mktemp
diff --git a/roles/openshift_metrics/tasks/update_master_config.yaml b/roles/openshift_metrics/tasks/update_master_config.yaml
new file mode 100644
index 000000000..20fc45fd4
--- /dev/null
+++ b/roles/openshift_metrics/tasks/update_master_config.yaml
@@ -0,0 +1,9 @@
+---
+- name: Adding metrics route information to metricsPublicURL
+ modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: assetConfig.metricsPublicURL
+ yaml_value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
+ notify: restart master
+ tags:
+ - update_master_config
diff --git a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
index 504476dc4..889317847 100644
--- a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
@@ -48,11 +48,6 @@ spec:
- "--require_node_auth=true"
- "--enable_client_encryption=true"
- "--require_client_auth=true"
- - "--keystore_file=/secret/cassandra.keystore"
- - "--keystore_password_file=/secret/cassandra.keystore.password"
- - "--truststore_file=/secret/cassandra.truststore"
- - "--truststore_password_file=/secret/cassandra.truststore.password"
- - "--cassandra_pem_file=/secret/cassandra.pem"
env:
- name: CASSANDRA_MASTER
value: "{{ master }}"
@@ -60,6 +55,10 @@ spec:
value: "/cassandra_data"
- name: JVM_OPTS
value: "-Dcassandra.commitlog.ignorereplayerrors=true"
+ - name: TRUSTSTORE_NODES_AUTHORITIES
+ value: "/hawkular-cassandra-certs/tls.peer.truststore.crt"
+ - name: TRUSTSTORE_CLIENT_AUTHORITIES
+ value: "/hawkular-cassandra-certs/tls.client.truststore.crt"
- name: POD_NAMESPACE
valueFrom:
fieldRef:
@@ -76,12 +75,12 @@ spec:
volumeMounts:
- name: cassandra-data
mountPath: "/cassandra_data"
- - name: hawkular-cassandra-secrets
- mountPath: "/secret"
-{% if ((openshift_metrics_cassandra_limits_cpu is defined and openshift_metrics_cassandra_limits_cpu is not none)
+ - name: hawkular-cassandra-certs
+ mountPath: "/hawkular-cassandra-certs"
+{% if ((openshift_metrics_cassandra_limits_cpu is defined and openshift_metrics_cassandra_limits_cpu is not none)
or (openshift_metrics_cassandra_limits_memory is defined and openshift_metrics_cassandra_limits_memory is not none)
or (openshift_metrics_cassandra_requests_cpu is defined and openshift_metrics_cassandra_requests_cpu is not none)
- or (openshift_metrics_cassandra_requests_memory is defined and openshift_metrics_cassandra_requests_memory is not none))
+ or (openshift_metrics_cassandra_requests_memory is defined and openshift_metrics_cassandra_requests_memory is not none))
%}
resources:
{% if (openshift_metrics_cassandra_limits_cpu is not none
@@ -95,8 +94,8 @@ spec:
memory: "{{openshift_metrics_cassandra_limits_memory}}"
{% endif %}
{% endif %}
-{% if (openshift_metrics_cassandra_requests_cpu is not none
- or openshift_metrics_cassandra_requests_memory is not none)
+{% if (openshift_metrics_cassandra_requests_cpu is not none
+ or openshift_metrics_cassandra_requests_memory is not none)
%}
requests:
{% if openshift_metrics_cassandra_requests_cpu is not none %}
@@ -129,6 +128,6 @@ spec:
persistentVolumeClaim:
claimName: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ node }}"
{% endif %}
- - name: hawkular-cassandra-secrets
+ - name: hawkular-cassandra-certs
secret:
- secretName: hawkular-cassandra-secrets
+ secretName: hawkular-cassandra-certs
diff --git a/roles/openshift_metrics/templates/pvc.j2 b/roles/openshift_metrics/templates/pvc.j2
index 8fbfa8b5d..c2e56ba21 100644
--- a/roles/openshift_metrics/templates/pvc.j2
+++ b/roles/openshift_metrics/templates/pvc.j2
@@ -1,10 +1,10 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: {{obj_name}}
+ name: "{{obj_name}}"
{% if labels is not defined %}
labels:
- logging-infra: support
+ metrics-infra: support
{% elif labels %}
labels:
{% for key, value in labels.iteritems() %}
diff --git a/roles/openshift_metrics/templates/secret.j2 b/roles/openshift_metrics/templates/secret.j2
index 370890c7d..5b9dba122 100644
--- a/roles/openshift_metrics/templates/secret.j2
+++ b/roles/openshift_metrics/templates/secret.j2
@@ -2,6 +2,12 @@ apiVersion: v1
kind: Secret
metadata:
name: "{{ name }}"
+{% if annotations is defined%}
+ annotations:
+{% for key, value in annotations.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
labels:
{% for k, v in labels.iteritems() %}
{{ k }}: {{ v }}
diff --git a/roles/openshift_metrics/vars/default_images.yml b/roles/openshift_metrics/vars/default_images.yml
new file mode 100644
index 000000000..678c4104c
--- /dev/null
+++ b/roles/openshift_metrics/vars/default_images.yml
@@ -0,0 +1,3 @@
+---
+__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('docker.io/openshift/origin-') }}"
+__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default('latest') }}"
diff --git a/roles/openshift_metrics/vars/main.yaml b/roles/openshift_metrics/vars/main.yaml
index 4a3724e3f..47aa76dd2 100644
--- a/roles/openshift_metrics/vars/main.yaml
+++ b/roles/openshift_metrics/vars/main.yaml
@@ -8,3 +8,4 @@ openshift_metrics_cassandra_storage_types:
- emptydir
- pv
- dynamic
+- nfs
diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..f28c3ce48
--- /dev/null
+++ b/roles/openshift_metrics/vars/openshift-enterprise.yml
@@ -0,0 +1,3 @@
+---
+__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}"
+__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default(openshift_release | default ('3.5.0') ) }}"
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index fffbf2994..bd95f8526 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -6,10 +6,6 @@ os_firewall_allow:
port: 80/tcp
- service: https
port: 443/tcp
-- service: Openshift kubelet ReadOnlyPort
- port: 10255/tcp
-- service: Openshift kubelet ReadOnlyPort udp
- port: 10255/udp
- service: OpenShift OVS sdn
port: 4789/udp
when: openshift.node.use_openshift_sdn | bool
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 10036abed..0da41d0c1 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -12,6 +12,7 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: lib_openshift
- role: openshift_common
- role: openshift_clock
- role: openshift_docker
@@ -25,10 +26,6 @@ dependencies:
port: 80/tcp
- service: https
port: 443/tcp
- - service: Openshift kubelet ReadOnlyPort
- port: 10255/tcp
- - service: Openshift kubelet ReadOnlyPort udp
- port: 10255/udp
- role: os_firewall
os_firewall_allow:
- service: OpenShift OVS sdn
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 691227915..626248306 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -153,7 +153,7 @@
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
command: >
- curl --silent --cacert {{ openshift.common.config_base }}/node/ca.crt
+ curl --silent --tlsv1.2 --cacert {{ openshift.common.config_base }}/node/ca.crt
{{ openshift_node_master_api_url }}/healthz/ready
args:
# Disables the following warning:
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index abe139418..d99f657bc 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -1,8 +1,4 @@
---
-- name: Load lib_openshift modules
- include_role:
- name: lib_openshift
-
- name: Pre-pull node system container image
command: >
atomic pull --storage=ostree {{ openshift.common.system_images_registry }}/{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index b76ce8797..8cfa5a026 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -1,8 +1,4 @@
---
-- name: Load lib_openshift modules
- include_role:
- name: lib_openshift
-
- name: Pre-pull OpenVSwitch system container image
command: >
atomic pull --storage=ostree {{ openshift.common.system_images_registry }}/{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index d3c3feb68..f2f929232 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -40,6 +40,15 @@ servingInfo:
certFile: server.crt
clientCA: ca.crt
keyFile: server.key
+{% if openshift_node_min_tls_version is defined %}
+ minTLSVersion: {{ openshift_node_min_tls_version }}
+{% endif %}
+{% if openshift_node_cipher_suites is defined %}
+ cipherSuites:
+{% for cipher_suite in openshift_node_cipher_suites %}
+ - {{ cipher_suite }}
+{% endfor %}
+{% endif %}
volumeDirectory: {{ openshift.common.data_dir }}/openshift.local.volumes
proxyArguments:
proxy-mode:
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 6ec88f85e..c42bdb7c3 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -6,6 +6,8 @@ PartOf=docker.service
Requires=docker.service
{% if openshift.common.use_openshift_sdn %}
Requires=openvswitch.service
+After=ovsdb-server.service
+After=ovs-vswitchd.service
{% endif %}
Wants={{ openshift.common.service_type }}-master.service
Requires={{ openshift.common.service_type }}-node-dep.service
@@ -15,7 +17,7 @@ After={{ openshift.common.service_type }}-node-dep.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/roles/openshift_node_certificates/README.md b/roles/openshift_node_certificates/README.md
index f4215950f..fef2f0783 100644
--- a/roles/openshift_node_certificates/README.md
+++ b/roles/openshift_node_certificates/README.md
@@ -23,6 +23,7 @@ From this role:
|-------------------------------------|-------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|
| openshift_generated_configs_dir | `{{ openshift.common.config_base }}/generated-configs` | Directory in which per-node generated config directories will be created on the `openshift_ca_host`. |
| openshift_node_cert_subdir | `node-{{ openshift.common.hostname }}` | Directory within `openshift_generated_configs_dir` where per-node certificates will be placed on the `openshift_ca_host`. |
+| openshift_node_cert_expire_days | `730` (2 years) | Validity of the certificates in days. Works only with OpenShift version 1.5 (3.5) and later. |
| openshift_node_config_dir | `{{ openshift.common.config_base }}/node` | Node configuration directory in which certificates will be deployed on nodes. |
| openshift_node_generated_config_dir | `{{ openshift_generated_configs_dir }}/{{ openshift_node_cert_subdir }` | Full path to the per-node generated config directory. |
diff --git a/roles/openshift_node_certificates/defaults/main.yml b/roles/openshift_node_certificates/defaults/main.yml
new file mode 100644
index 000000000..70a38b844
--- /dev/null
+++ b/roles/openshift_node_certificates/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+openshift_node_cert_expire_days: 730
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index 4cb89aba2..9120915b2 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -66,6 +66,9 @@
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
--user=system:node:{{ hostvars[item].openshift.common.hostname }}
+ {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
+ --expire-days={{ openshift_node_cert_expire_days }}
+ {% endif %}
args:
creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}"
with_items: "{{ hostvars
@@ -79,6 +82,9 @@
{{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert
--cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt
--key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.key
+ {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
+ --expire-days={{ openshift_node_cert_expire_days }}
+ {% endif %}
--overwrite=true
--hostnames={{ hostvars[item].openshift.common.hostname }},{{ hostvars[item].openshift.common.public_hostname }},{{ hostvars[item].openshift.common.ip }},{{ hostvars[item].openshift.common.public_ip }}
--signer-cert={{ openshift_ca_cert }}
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index 2f79931df..6ae8dbc12 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -51,24 +51,39 @@
failed_when: false
when: openshift.common.is_containerized | bool
+- name: Stop rpm based services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-node"
+ - openvswitch
+ failed_when: false
+ when: not openshift.common.is_containerized | bool
+
- name: Upgrade openvswitch
package:
name: openvswitch
state: latest
- register: ovs_pkg
when: not openshift.common.is_containerized | bool
- name: Restart openvswitch
systemd:
name: openvswitch
- state: restarted
+ state: started
when:
- not openshift.common.is_containerized | bool
- - ovs_pkg | changed
# Mandatory Docker restart, ensure all containerized services are running:
- include: docker/restart.yml
+- name: Update oreg value
+ yedit:
+ src: "{{ openshift.common.config_base }}/node/node-config.yaml"
+ key: 'imageConfig.format'
+ value: "{{ oreg_url }}"
+ when: oreg_url is defined
+
- name: Restart rpm node service
service:
name: "{{ openshift.common.service_type }}-node"
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
index 6ec88f85e..0ff398152 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
@@ -15,7 +15,7 @@ After={{ openshift.common.service_type }}-node-dep.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/roles/openshift_projects/tasks/main.yml b/roles/openshift_projects/tasks/main.yml
deleted file mode 100644
index 30d58afd3..000000000
--- a/roles/openshift_projects/tasks/main.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: False
-
-- name: Determine if projects exist
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- get projects {{ item.key }} -o json
- with_dict: "{{ openshift_projects }}"
- failed_when: false
- changed_when: false
- register: project_test
-
-- name: Create projects
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- new-project {{ item.item.key }}
- {% if item.item.value.default_node_selector | default(none) != none %}
- {{ '--node-selector=' ~ item.item.value.default_node_selector }}
- {% endif %}
- when: item.rc == 1
- with_items:
- - "{{ project_test.results }}"
-
-- name: Update project default node selector if necessary
- command: >
- {{ openshift.common.client_binary }}
- --config={{ mktemp.stdout }}/admin.kubeconfig patch namespace {{ item.item.key }}
- -p '{"metadata": {"annotations": {"openshift.io/node-selector": "{{ item.item.value.default_node_selector }}"}}}'
- when: "{{ item.rc == 0 and item.item.value.default_node_selector | default(none) != none
- and item.item.value.default_node_selector | default(none) != (item.stdout | from_json).metadata.annotations['openshift.io/node-selector'] | default(none) }}"
- with_items:
- - "{{ project_test.results }}"
- register: annotate_project
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
diff --git a/roles/openshift_projects/vars/main.yml b/roles/openshift_projects/vars/main.yml
deleted file mode 100644
index 9967e26f4..000000000
--- a/roles/openshift_projects/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
diff --git a/roles/openshift_repos/meta/main.yml b/roles/openshift_repos/meta/main.yml
index cc18c453c..1b043863b 100644
--- a/roles/openshift_repos/meta/main.yml
+++ b/roles/openshift_repos/meta/main.yml
@@ -11,4 +11,5 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- role: openshift_sanitize_inventory
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index ffb760bfe..84a0905cc 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -4,10 +4,6 @@
path: /run/ostree-booted
register: ostree_booted
-- assert:
- that: openshift_deployment_type in known_openshift_deployment_types
- msg: "openshift_deployment_type must be one of {{ known_openshift_deployment_types }}"
-
- block:
- name: Ensure libselinux-python is installed
package: name=libselinux-python state=present
diff --git a/roles/openshift_sanitize_inventory/README.md b/roles/openshift_sanitize_inventory/README.md
new file mode 100644
index 000000000..23f6b84fc
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/README.md
@@ -0,0 +1,37 @@
+OpenShift Inventory
+===================
+
+Provides a role to validate and normalize the variables the user has
+provided. This role should run before pretty much everything else so that
+this kind of logic only has to be in one place. However, complicated
+business logic should usually be left to other roles.
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+None
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+OpenShift dev (dev@lists.openshift.redhat.com)
diff --git a/roles/openshift_projects/meta/main.yml b/roles/openshift_sanitize_inventory/meta/main.yml
index 107a70b83..f5b37186e 100644
--- a/roles/openshift_projects/meta/main.yml
+++ b/roles/openshift_sanitize_inventory/meta/main.yml
@@ -1,15 +1,15 @@
---
galaxy_info:
- author: Jason DeTiberus
- description: OpenShift Projects
+ author: OpenShift dev
+ description:
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.9
+ min_ansible_version: 1.8
platforms:
- name: EL
versions:
- 7
categories:
- cloud
-dependencies:
-- { role: openshift_facts }
+ - system
+dependencies: []
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
new file mode 100644
index 000000000..b944c8991
--- /dev/null
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -0,0 +1,35 @@
+---
+- name: Standardize on latest variable names
+ set_fact:
+ # goal is to deprecate deployment_type in favor of openshift_deployment_type.
+ # both will be accepted for now, but code should refer to the new name.
+ # TODO: once this is well-documented, add deprecation notice if using old name.
+ deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
+ openshift_deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
+
+- name: Abort when deployment type is invalid
+ # this variable is required; complain early and clearly if it is invalid.
+ when: openshift_deployment_type not in known_openshift_deployment_types
+ fail:
+ msg: |-
+ Please set openshift_deployment_type to one of:
+ {{ known_openshift_deployment_types | join(', ') }}
+
+- name: Normalize openshift_release
+ set_fact:
+ # Normalize release if provided, e.g. "v3.5" => "3.5"
+ # Currently this is not required to be defined for all installs, and the
+ # `openshift_version` role can generally figure out the specific version
+ # that gets installed (e.g. 3.5.0.1). So consider this the user's expressed
+ # intent (if any), not the authoritative version that will be installed.
+ openshift_release: "{{ openshift_release | string | regex_replace('^v', '') }}"
+ when: openshift_release is defined
+
+- name: Abort when openshift_release is invalid
+ when:
+ - openshift_release is defined
+ - not openshift_release | match('\d+(\.\d+){1,3}$')
+ fail:
+ msg: |-
+ openshift_release is "{{ openshift_release }}" which is not a valid version string.
+ Please set it to a version string like "3.4".
diff --git a/roles/openshift_repos/vars/main.yml b/roles/openshift_sanitize_inventory/vars/main.yml
index da48e42c1..da48e42c1 100644
--- a/roles/openshift_repos/vars/main.yml
+++ b/roles/openshift_sanitize_inventory/vars/main.yml
diff --git a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
deleted file mode 100644
index b8cbe9a84..000000000
--- a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-####
-#
-# OSE 3.0.z did not have 'oadm policy add-scc-to-user'.
-#
-####
-
-- name: tmp dir for openshift
- file:
- path: /tmp/openshift
- state: directory
- owner: root
- mode: 0700
-
-- name: Create service account configs
- template:
- src: serviceaccount.j2
- dest: "/tmp/openshift/{{ item }}-serviceaccount.yaml"
- with_items: '{{ openshift_serviceaccounts_names }}'
-
-- name: Get current security context constraints
- shell: >
- {{ openshift.common.client_binary }} get scc privileged -o yaml
- --output-version=v1 > /tmp/openshift/scc.yaml
- changed_when: false
-
-- name: Add security context constraint for {{ item }}
- lineinfile:
- dest: /tmp/openshift/scc.yaml
- line: "- system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}"
- insertafter: "^users:$"
- when: "item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users }}"
- with_nested:
- - '{{ openshift_serviceaccounts_names }}'
- - '{{ scc_test.results }}'
-
-- name: Apply new scc rules for service accounts
- command: "{{ openshift.common.client_binary }} update -f /tmp/openshift/scc.yaml --api-version=v1"
diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml
deleted file mode 100644
index 1d570fa5b..000000000
--- a/roles/openshift_serviceaccounts/tasks/main.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- name: create the service account
- oc_serviceaccount:
- name: "{{ item }}"
- namespace: "{{ openshift_serviceaccounts_namespace }}"
- state: present
- with_items:
- - "{{ openshift_serviceaccounts_names }}"
-
-- name: test if scc needs to be updated
- command: >
- {{ openshift.common.client_binary }} get scc {{ item }} -o yaml
- changed_when: false
- failed_when: false
- register: scc_test
- with_items: "{{ openshift_serviceaccounts_sccs }}"
-
-- name: Grant the user access to the appropriate scc
- command: >
- {{ openshift.common.client_binary }} adm policy add-scc-to-user
- {{ item.1.item }} system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}
- when: "openshift.common.version_gte_3_1_or_1_1 and item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users | default([]) }}"
- with_nested:
- - "{{ openshift_serviceaccounts_names }}"
- - "{{ scc_test.results }}"
-
-- include: legacy_add_scc_to_user.yml
- when: not openshift.common.version_gte_3_1_or_1_1
diff --git a/roles/openshift_serviceaccounts/templates/serviceaccount.j2 b/roles/openshift_serviceaccounts/templates/serviceaccount.j2
deleted file mode 100644
index c5f12421f..000000000
--- a/roles/openshift_serviceaccounts/templates/serviceaccount.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{ item.0 }}
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 0f2a660a7..c3d001bb4 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -9,24 +9,55 @@
# be used by default. Users must indicate what they want.
- fail:
msg: "Must specify openshift_release or openshift_image_tag in inventory to install origin. (suggestion: add openshift_release=\"1.2\" to inventory)"
- when: is_containerized | bool and openshift.common.deployment_type == 'origin' and openshift_release is not defined and openshift_image_tag is not defined
+ when:
+ - is_containerized | bool
+ - openshift.common.deployment_type == 'origin'
+ - openshift_release is not defined
+ - openshift_image_tag is not defined
# Normalize some values that we need in a certain format that might be confusing:
- set_fact:
openshift_release: "{{ openshift_release[1:] }}"
- when: openshift_release is defined and openshift_release[0] == 'v'
+ when:
+ - openshift_release is defined
+ - openshift_release[0] == 'v'
- set_fact:
openshift_release: "{{ openshift_release | string }}"
when: openshift_release is defined
-- set_fact:
- openshift_image_tag: "{{ 'v' + openshift_image_tag }}"
- when: openshift_image_tag is defined and openshift_image_tag[0] != 'v' and openshift_image_tag != 'latest'
-
-- set_fact:
- openshift_pkg_version: "{{ '-' + openshift_pkg_version }}"
- when: openshift_pkg_version is defined and openshift_pkg_version[0] != '-'
+# Verify that the image tag is in a valid format
+- block:
+
+ # Verifies that when the deployment type is origin the version:
+ # - starts with a v
+ # - Has 3 integers seperated by dots
+ # It also allows for optional trailing data which:
+ # - must start with a dash
+ # - may contain numbers, letters, dashes and dots.
+ - name: Verify Origin openshift_image_tag is valid
+ assert:
+ that:
+ - "{{ openshift_image_tag|match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}"
+ msg: "openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1"
+ when: openshift.common.deployment_type == 'origin'
+
+ # Verifies that when the deployment type is openshift-enterprise the version:
+ # - starts with a v
+ # - Has at least 2 integers seperated by dots
+ # It also allows for optional trailing data which:
+ # - must start with a dash
+ # - may contain numbers
+ - name: Verify Enterprise openshift_image_tag is valid
+ assert:
+ that:
+ - "{{ openshift_image_tag|match('(^v\\d+\\.\\d+[\\.\\d+]*(-\\d+)?$)') }}"
+ msg: "openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4"
+ when: openshift.common.deployment_type == 'openshift-enterprise'
+
+ when:
+ - openshift_image_tag is defined
+ - openshift_image_tag != "latest"
# Make sure we copy this to a fact if given a var:
- set_fact:
@@ -38,7 +69,10 @@
- name: Use openshift.common.version fact as version to configure if already installed
set_fact:
openshift_version: "{{ openshift.common.version }}"
- when: openshift.common.version is defined and openshift_version is not defined and openshift_protect_installed_version | bool
+ when:
+ - openshift.common.version is defined
+ - openshift_version is not defined
+ - openshift_protect_installed_version | bool
- name: Set openshift_version for rpm installation
include: set_version_rpm.yml
@@ -48,17 +82,39 @@
include: set_version_containerized.yml
when: is_containerized | bool
+# Warn if the user has provided an openshift_image_tag but is not doing a containerized install
+# NOTE: This will need to be modified/removed for future container + rpm installations work.
+- name: Warn if openshift_image_tag is defined when not doing a containerized install
+ debug:
+ msg: >
+ openshift_image_tag is used for containerized installs. If you are trying to
+ specify an image for a non-container install see oreg_url.
+ when:
+ - not is_containerized | bool
+ - openshift_image_tag is defined
+
+
# At this point we know openshift_version is set appropriately. Now we set
# openshift_image_tag and openshift_pkg_version, so all roles can always assume
# each of this variables *will* be set correctly and can use them per their
# intended purpose.
-- set_fact:
- openshift_image_tag: v{{ openshift_version }}
+- block:
+ - debug:
+ msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}"
+
+ - set_fact:
+ openshift_image_tag: v{{ openshift_version }}
+
when: openshift_image_tag is not defined
-- set_fact:
- openshift_pkg_version: -{{ openshift_version }}
+- block:
+ - debug:
+ msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}"
+
+ - set_fact:
+ openshift_pkg_version: -{{ openshift_version }}
+
when: openshift_pkg_version is not defined
- fail:
@@ -75,13 +131,18 @@
- fail:
msg: "No OpenShift version available, please ensure your systems are fully registered and have access to appropriate yum repositories."
- when: not is_containerized | bool and openshift_version == '0.0'
+ when:
+ - not is_containerized | bool
+ - openshift_version == '0.0'
# We can't map an openshift_release to full rpm version like we can with containers, make sure
# the rpm version we looked up matches the release requested and error out if not.
- fail:
msg: "Detected OpenShift version {{ openshift_version }} does not match requested openshift_release {{ openshift_release }}. You may need to adjust your yum repositories, inventory, or run the appropriate OpenShift upgrade playbook."
- when: not is_containerized | bool and openshift_release is defined and not openshift_version.startswith(openshift_release) | bool
+ when:
+ - not is_containerized | bool
+ - openshift_release is defined
+ - not openshift_version.startswith(openshift_release) | bool
# The end result of these three variables is quite important so make sure they are displayed and logged:
- debug: var=openshift_release
diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml
index cd0f20ae9..0ec4c49d6 100644
--- a/roles/openshift_version/tasks/set_version_containerized.yml
+++ b/roles/openshift_version/tasks/set_version_containerized.yml
@@ -4,12 +4,16 @@
# Expects a leading "v" in inventory, strip it off here unless
# openshift_image_tag=latest
openshift_version: "{{ openshift_image_tag[1:].split('-')[0] if openshift_image_tag != 'latest' else openshift_image_tag }}"
- when: openshift_image_tag is defined and openshift_version is not defined
+ when:
+ - openshift_image_tag is defined
+ - openshift_version is not defined
- name: Set containerized version to configure if openshift_release specified
set_fact:
openshift_version: "{{ openshift_release }}"
- when: openshift_release is defined and openshift_version is not defined
+ when:
+ - openshift_release is defined
+ - openshift_version is not defined
- name: Lookup latest containerized version if no version specified
command: >
@@ -20,7 +24,10 @@
# Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a)
- set_fact:
openshift_version: "{{ (cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2] | join('-'))[1:] }}"
- when: openshift_version is not defined and openshift.common.deployment_type == 'origin' and cli_image_version.stdout_lines[0].split('-') | length > 1
+ when:
+ - openshift_version is not defined
+ - openshift.common.deployment_type == 'origin'
+ - cli_image_version.stdout_lines[0].split('-') | length > 1
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
@@ -32,11 +39,15 @@
command: >
docker run --rm {{ openshift.common.cli_image }}:v{{ openshift_version }} version
register: cli_image_version
- when: openshift_version is defined and openshift_version.split('.') | length == 2
+ when:
+ - openshift_version is defined
+ - openshift_version.split('.') | length == 2
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2][1:] | join('-') if openshift.common.deployment_type == 'origin' else cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
- when: openshift_version is defined and openshift_version.split('.') | length == 2
+ when:
+ - openshift_version is defined
+ - openshift_version.split('.') | length == 2
# We finally have the specific version. Now we clean up any strange
# dangly +c0mm1t-offset tags in the version. See also,
diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml
index 7fa74e24f..c7604af1a 100644
--- a/roles/openshift_version/tasks/set_version_rpm.yml
+++ b/roles/openshift_version/tasks/set_version_rpm.yml
@@ -3,16 +3,46 @@
set_fact:
# Expects a leading "-" in inventory, strip it off here, and remove trailing release,
openshift_version: "{{ openshift_pkg_version[1:].split('-')[0] }}"
- when: openshift_pkg_version is defined and openshift_version is not defined
+ when:
+ - openshift_pkg_version is defined
+ - openshift_version is not defined
+
+# if {{ openshift.common.service_type}}-excluder is enabled,
+# the repoquery for {{ openshift.common.service_type}} will not work.
+# Thus, create a temporary yum,conf file where exclude= is set to an empty list
+- name: Create temporary yum.conf file
+ command: mktemp -d /tmp/yum.conf.XXXXXX
+ register: yum_conf_temp_file_result
+
+- set_fact:
+ yum_conf_temp_file: "{{yum_conf_temp_file_result.stdout}}/yum.conf"
+
+- name: Copy yum.conf into the temporary file
+ copy:
+ src: /etc/yum.conf
+ dest: "{{ yum_conf_temp_file }}"
+ remote_src: True
+
+- name: Clear the exclude= list in the temporary yum.conf
+ lineinfile:
+ # since ansible 2.3 s/dest/path
+ dest: "{{ yum_conf_temp_file }}"
+ regexp: '^exclude='
+ line: 'exclude='
- name: Gather common package version
command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type}}"
+ {{ repoquery_cmd }} --config "{{ yum_conf_temp_file }}" --qf '%{version}' "{{ openshift.common.service_type}}"
register: common_version
failed_when: false
changed_when: false
when: openshift_version is not defined
+- name: Delete the temporary yum.conf
+ file:
+ path: "{{ yum_conf_temp_file_result.stdout }}"
+ state: absent
+
- set_fact:
openshift_version: "{{ common_version.stdout | default('0.0', True) }}"
when: openshift_version is not defined
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index a9a69f73c..4b2979887 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -34,6 +34,22 @@
pause: seconds=10
when: result | changed
+- name: Restart polkitd
+ systemd:
+ name: polkit
+ state: restarted
+ when: result | changed
+
+# Fix suspected race between firewalld and polkit BZ1436964
+- name: Wait for polkit action to have been created
+ command: pkaction --action-id=org.fedoraproject.FirewallD1.config.info
+ ignore_errors: true
+ register: pkaction
+ changed_when: false
+ until: pkaction.rc == 0
+ retries: 6
+ delay: 10
+
- name: Add firewalld allow rules
firewalld:
port: "{{ item.port }}"
diff --git a/test/openshift_version_tests.py b/test/openshift_version_tests.py
new file mode 100644
index 000000000..52e9a9888
--- /dev/null
+++ b/test/openshift_version_tests.py
@@ -0,0 +1,72 @@
+""" Tests for the openshift_version Ansible filter module. """
+# pylint: disable=missing-docstring,invalid-name
+
+import os
+import sys
+import unittest
+
+sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../filter_plugins/")] + sys.path
+
+# pylint: disable=import-error
+import openshift_version # noqa: E402
+
+
+class OpenShiftVersionTests(unittest.TestCase):
+
+ openshift_version_filters = openshift_version.FilterModule()
+
+ # Static tests for legacy filters.
+ legacy_gte_tests = [{'name': 'oo_version_gte_3_1_or_1_1',
+ 'positive_enterprise_version': '3.2.0',
+ 'negative_enterprise_version': '3.0.0',
+ 'positive_origin_version': '1.2.0',
+ 'negative_origin_version': '1.0.0'},
+ {'name': 'oo_version_gte_3_1_1_or_1_1_1',
+ 'positive_enterprise_version': '3.2.0',
+ 'negative_enterprise_version': '3.1.0',
+ 'positive_origin_version': '1.2.0',
+ 'negative_origin_version': '1.1.0'},
+ {'name': 'oo_version_gte_3_2_or_1_2',
+ 'positive_enterprise_version': '3.3.0',
+ 'negative_enterprise_version': '3.1.0',
+ 'positive_origin_version': '1.3.0',
+ 'negative_origin_version': '1.1.0'},
+ {'name': 'oo_version_gte_3_3_or_1_3',
+ 'positive_enterprise_version': '3.4.0',
+ 'negative_enterprise_version': '3.2.0',
+ 'positive_origin_version': '1.4.0',
+ 'negative_origin_version': '1.2.0'},
+ {'name': 'oo_version_gte_3_4_or_1_4',
+ 'positive_enterprise_version': '3.5.0',
+ 'negative_enterprise_version': '3.3.0',
+ 'positive_origin_version': '1.5.0',
+ 'negative_origin_version': '1.3.0'},
+ {'name': 'oo_version_gte_3_5_or_1_5',
+ 'positive_enterprise_version': '3.6.0',
+ 'negative_enterprise_version': '3.4.0',
+ 'positive_origin_version': '1.6.0',
+ 'negative_origin_version': '1.4.0'}]
+
+ def test_legacy_gte_filters(self):
+ for test in self.legacy_gte_tests:
+ for deployment_type in ['enterprise', 'origin']:
+ # Test negative case per deployment_type
+ self.assertFalse(
+ self.openshift_version_filters._filters[test['name']](
+ test["negative_{}_version".format(deployment_type)], deployment_type))
+ # Test positive case per deployment_type
+ self.assertTrue(
+ self.openshift_version_filters._filters[test['name']](
+ test["positive_{}_version".format(deployment_type)], deployment_type))
+
+ def test_gte_filters(self):
+ for major, minor_start, minor_end in self.openshift_version_filters.versions:
+ for minor in range(minor_start, minor_end):
+ # Test positive case
+ self.assertTrue(
+ self.openshift_version_filters._filters["oo_version_gte_{}_{}".format(major, minor)](
+ "{}.{}".format(major, minor + 1)))
+ # Test negative case
+ self.assertFalse(
+ self.openshift_version_filters._filters["oo_version_gte_{}_{}".format(major, minor)](
+ "{}.{}".format(major, minor)))
diff --git a/utils/src/ooinstall/ansible_plugins/facts_callback.py b/utils/src/ooinstall/ansible_plugins/facts_callback.py
index c881e4b92..433e29dde 100644
--- a/utils/src/ooinstall/ansible_plugins/facts_callback.py
+++ b/utils/src/ooinstall/ansible_plugins/facts_callback.py
@@ -7,6 +7,12 @@ import yaml
from ansible.plugins.callback import CallbackBase
from ansible.parsing.yaml.dumper import AnsibleDumper
+# ansible.compat.six goes away with Ansible 2.4
+try:
+ from ansible.compat.six import u
+except ImportError:
+ from ansible.module_utils.six import u
+
# pylint: disable=super-init-not-called
class CallbackModule(CallbackBase):
@@ -39,10 +45,10 @@ class CallbackModule(CallbackBase):
facts = abridged_result['result']['ansible_facts']['openshift']
hosts_yaml = {}
hosts_yaml[res._host.get_name()] = facts
- to_dump = yaml.dump(hosts_yaml,
- allow_unicode=True,
- default_flow_style=False,
- Dumper=AnsibleDumper)
+ to_dump = u(yaml.dump(hosts_yaml,
+ allow_unicode=True,
+ default_flow_style=False,
+ Dumper=AnsibleDumper))
os.write(self.hosts_yaml, to_dump)
def v2_runner_on_skipped(self, res):